1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ctype.h> 30 #include <linux/stackdepot.h> 31 #include <linux/debugobjects.h> 32 #include <linux/kallsyms.h> 33 #include <linux/kfence.h> 34 #include <linux/memory.h> 35 #include <linux/math64.h> 36 #include <linux/fault-inject.h> 37 #include <linux/kmemleak.h> 38 #include <linux/stacktrace.h> 39 #include <linux/prefetch.h> 40 #include <linux/memcontrol.h> 41 #include <linux/random.h> 42 #include <kunit/test.h> 43 #include <kunit/test-bug.h> 44 #include <linux/sort.h> 45 46 #include <linux/debugfs.h> 47 #include <trace/events/kmem.h> 48 49 #include "internal.h" 50 51 /* 52 * Lock order: 53 * 1. slab_mutex (Global Mutex) 54 * 2. node->list_lock (Spinlock) 55 * 3. kmem_cache->cpu_slab->lock (Local lock) 56 * 4. slab_lock(slab) (Only on some arches) 57 * 5. object_map_lock (Only for debugging) 58 * 59 * slab_mutex 60 * 61 * The role of the slab_mutex is to protect the list of all the slabs 62 * and to synchronize major metadata changes to slab cache structures. 63 * Also synchronizes memory hotplug callbacks. 64 * 65 * slab_lock 66 * 67 * The slab_lock is a wrapper around the page lock, thus it is a bit 68 * spinlock. 69 * 70 * The slab_lock is only used on arches that do not have the ability 71 * to do a cmpxchg_double. It only protects: 72 * 73 * A. slab->freelist -> List of free objects in a slab 74 * B. slab->inuse -> Number of objects in use 75 * C. slab->objects -> Number of objects in slab 76 * D. slab->frozen -> frozen state 77 * 78 * Frozen slabs 79 * 80 * If a slab is frozen then it is exempt from list management. It is 81 * the cpu slab which is actively allocated from by the processor that 82 * froze it and it is not on any list. The processor that froze the 83 * slab is the one who can perform list operations on the slab. Other 84 * processors may put objects onto the freelist but the processor that 85 * froze the slab is the only one that can retrieve the objects from the 86 * slab's freelist. 87 * 88 * CPU partial slabs 89 * 90 * The partially empty slabs cached on the CPU partial list are used 91 * for performance reasons, which speeds up the allocation process. 92 * These slabs are not frozen, but are also exempt from list management, 93 * by clearing the PG_workingset flag when moving out of the node 94 * partial list. Please see __slab_free() for more details. 95 * 96 * To sum up, the current scheme is: 97 * - node partial slab: PG_Workingset && !frozen 98 * - cpu partial slab: !PG_Workingset && !frozen 99 * - cpu slab: !PG_Workingset && frozen 100 * - full slab: !PG_Workingset && !frozen 101 * 102 * list_lock 103 * 104 * The list_lock protects the partial and full list on each node and 105 * the partial slab counter. If taken then no new slabs may be added or 106 * removed from the lists nor make the number of partial slabs be modified. 107 * (Note that the total number of slabs is an atomic value that may be 108 * modified without taking the list lock). 109 * 110 * The list_lock is a centralized lock and thus we avoid taking it as 111 * much as possible. As long as SLUB does not have to handle partial 112 * slabs, operations can continue without any centralized lock. F.e. 113 * allocating a long series of objects that fill up slabs does not require 114 * the list lock. 115 * 116 * For debug caches, all allocations are forced to go through a list_lock 117 * protected region to serialize against concurrent validation. 118 * 119 * cpu_slab->lock local lock 120 * 121 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 122 * except the stat counters. This is a percpu structure manipulated only by 123 * the local cpu, so the lock protects against being preempted or interrupted 124 * by an irq. Fast path operations rely on lockless operations instead. 125 * 126 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 127 * which means the lockless fastpath cannot be used as it might interfere with 128 * an in-progress slow path operations. In this case the local lock is always 129 * taken but it still utilizes the freelist for the common operations. 130 * 131 * lockless fastpaths 132 * 133 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 134 * are fully lockless when satisfied from the percpu slab (and when 135 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 136 * They also don't disable preemption or migration or irqs. They rely on 137 * the transaction id (tid) field to detect being preempted or moved to 138 * another cpu. 139 * 140 * irq, preemption, migration considerations 141 * 142 * Interrupts are disabled as part of list_lock or local_lock operations, or 143 * around the slab_lock operation, in order to make the slab allocator safe 144 * to use in the context of an irq. 145 * 146 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 147 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 148 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 149 * doesn't have to be revalidated in each section protected by the local lock. 150 * 151 * SLUB assigns one slab for allocation to each processor. 152 * Allocations only occur from these slabs called cpu slabs. 153 * 154 * Slabs with free elements are kept on a partial list and during regular 155 * operations no list for full slabs is used. If an object in a full slab is 156 * freed then the slab will show up again on the partial lists. 157 * We track full slabs for debugging purposes though because otherwise we 158 * cannot scan all objects. 159 * 160 * Slabs are freed when they become empty. Teardown and setup is 161 * minimal so we rely on the page allocators per cpu caches for 162 * fast frees and allocs. 163 * 164 * slab->frozen The slab is frozen and exempt from list processing. 165 * This means that the slab is dedicated to a purpose 166 * such as satisfying allocations for a specific 167 * processor. Objects may be freed in the slab while 168 * it is frozen but slab_free will then skip the usual 169 * list operations. It is up to the processor holding 170 * the slab to integrate the slab into the slab lists 171 * when the slab is no longer needed. 172 * 173 * One use of this flag is to mark slabs that are 174 * used for allocations. Then such a slab becomes a cpu 175 * slab. The cpu slab may be equipped with an additional 176 * freelist that allows lockless access to 177 * free objects in addition to the regular freelist 178 * that requires the slab lock. 179 * 180 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 181 * options set. This moves slab handling out of 182 * the fast path and disables lockless freelists. 183 */ 184 185 /* 186 * We could simply use migrate_disable()/enable() but as long as it's a 187 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 188 */ 189 #ifndef CONFIG_PREEMPT_RT 190 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 191 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 192 #define USE_LOCKLESS_FAST_PATH() (true) 193 #else 194 #define slub_get_cpu_ptr(var) \ 195 ({ \ 196 migrate_disable(); \ 197 this_cpu_ptr(var); \ 198 }) 199 #define slub_put_cpu_ptr(var) \ 200 do { \ 201 (void)(var); \ 202 migrate_enable(); \ 203 } while (0) 204 #define USE_LOCKLESS_FAST_PATH() (false) 205 #endif 206 207 #ifndef CONFIG_SLUB_TINY 208 #define __fastpath_inline __always_inline 209 #else 210 #define __fastpath_inline 211 #endif 212 213 #ifdef CONFIG_SLUB_DEBUG 214 #ifdef CONFIG_SLUB_DEBUG_ON 215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 216 #else 217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 218 #endif 219 #endif /* CONFIG_SLUB_DEBUG */ 220 221 /* Structure holding parameters for get_partial() call chain */ 222 struct partial_context { 223 gfp_t flags; 224 unsigned int orig_size; 225 void *object; 226 }; 227 228 static inline bool kmem_cache_debug(struct kmem_cache *s) 229 { 230 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 231 } 232 233 static inline bool slub_debug_orig_size(struct kmem_cache *s) 234 { 235 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && 236 (s->flags & SLAB_KMALLOC)); 237 } 238 239 void *fixup_red_left(struct kmem_cache *s, void *p) 240 { 241 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 242 p += s->red_left_pad; 243 244 return p; 245 } 246 247 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 248 { 249 #ifdef CONFIG_SLUB_CPU_PARTIAL 250 return !kmem_cache_debug(s); 251 #else 252 return false; 253 #endif 254 } 255 256 /* 257 * Issues still to be resolved: 258 * 259 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 260 * 261 * - Variable sizing of the per node arrays 262 */ 263 264 /* Enable to log cmpxchg failures */ 265 #undef SLUB_DEBUG_CMPXCHG 266 267 #ifndef CONFIG_SLUB_TINY 268 /* 269 * Minimum number of partial slabs. These will be left on the partial 270 * lists even if they are empty. kmem_cache_shrink may reclaim them. 271 */ 272 #define MIN_PARTIAL 5 273 274 /* 275 * Maximum number of desirable partial slabs. 276 * The existence of more partial slabs makes kmem_cache_shrink 277 * sort the partial list by the number of objects in use. 278 */ 279 #define MAX_PARTIAL 10 280 #else 281 #define MIN_PARTIAL 0 282 #define MAX_PARTIAL 0 283 #endif 284 285 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 286 SLAB_POISON | SLAB_STORE_USER) 287 288 /* 289 * These debug flags cannot use CMPXCHG because there might be consistency 290 * issues when checking or reading debug information 291 */ 292 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 293 SLAB_TRACE) 294 295 296 /* 297 * Debugging flags that require metadata to be stored in the slab. These get 298 * disabled when slab_debug=O is used and a cache's min order increases with 299 * metadata. 300 */ 301 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 302 303 #define OO_SHIFT 16 304 #define OO_MASK ((1 << OO_SHIFT) - 1) 305 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 306 307 /* Internal SLUB flags */ 308 /* Poison object */ 309 #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) 310 /* Use cmpxchg_double */ 311 312 #ifdef system_has_freelist_aba 313 #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) 314 #else 315 #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED 316 #endif 317 318 /* 319 * Tracking user of a slab. 320 */ 321 #define TRACK_ADDRS_COUNT 16 322 struct track { 323 unsigned long addr; /* Called from address */ 324 #ifdef CONFIG_STACKDEPOT 325 depot_stack_handle_t handle; 326 #endif 327 int cpu; /* Was running on cpu */ 328 int pid; /* Pid context */ 329 unsigned long when; /* When did the operation occur */ 330 }; 331 332 enum track_item { TRACK_ALLOC, TRACK_FREE }; 333 334 #ifdef SLAB_SUPPORTS_SYSFS 335 static int sysfs_slab_add(struct kmem_cache *); 336 static int sysfs_slab_alias(struct kmem_cache *, const char *); 337 #else 338 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 339 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 340 { return 0; } 341 #endif 342 343 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 344 static void debugfs_slab_add(struct kmem_cache *); 345 #else 346 static inline void debugfs_slab_add(struct kmem_cache *s) { } 347 #endif 348 349 enum stat_item { 350 ALLOC_FASTPATH, /* Allocation from cpu slab */ 351 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 352 FREE_FASTPATH, /* Free to cpu slab */ 353 FREE_SLOWPATH, /* Freeing not to cpu slab */ 354 FREE_FROZEN, /* Freeing to frozen slab */ 355 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 356 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 357 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 358 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 359 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 360 ALLOC_NODE_MISMATCH, /* Switching cpu slab */ 361 FREE_SLAB, /* Slab freed to the page allocator */ 362 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 363 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 364 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 365 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 366 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 367 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 368 DEACTIVATE_BYPASS, /* Implicit deactivation */ 369 ORDER_FALLBACK, /* Number of times fallback was necessary */ 370 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ 371 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ 372 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 373 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 374 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 375 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 376 NR_SLUB_STAT_ITEMS 377 }; 378 379 #ifndef CONFIG_SLUB_TINY 380 /* 381 * When changing the layout, make sure freelist and tid are still compatible 382 * with this_cpu_cmpxchg_double() alignment requirements. 383 */ 384 struct kmem_cache_cpu { 385 union { 386 struct { 387 void **freelist; /* Pointer to next available object */ 388 unsigned long tid; /* Globally unique transaction id */ 389 }; 390 freelist_aba_t freelist_tid; 391 }; 392 struct slab *slab; /* The slab from which we are allocating */ 393 #ifdef CONFIG_SLUB_CPU_PARTIAL 394 struct slab *partial; /* Partially allocated slabs */ 395 #endif 396 local_lock_t lock; /* Protects the fields above */ 397 #ifdef CONFIG_SLUB_STATS 398 unsigned int stat[NR_SLUB_STAT_ITEMS]; 399 #endif 400 }; 401 #endif /* CONFIG_SLUB_TINY */ 402 403 static inline void stat(const struct kmem_cache *s, enum stat_item si) 404 { 405 #ifdef CONFIG_SLUB_STATS 406 /* 407 * The rmw is racy on a preemptible kernel but this is acceptable, so 408 * avoid this_cpu_add()'s irq-disable overhead. 409 */ 410 raw_cpu_inc(s->cpu_slab->stat[si]); 411 #endif 412 } 413 414 static inline 415 void stat_add(const struct kmem_cache *s, enum stat_item si, int v) 416 { 417 #ifdef CONFIG_SLUB_STATS 418 raw_cpu_add(s->cpu_slab->stat[si], v); 419 #endif 420 } 421 422 /* 423 * The slab lists for all objects. 424 */ 425 struct kmem_cache_node { 426 spinlock_t list_lock; 427 unsigned long nr_partial; 428 struct list_head partial; 429 #ifdef CONFIG_SLUB_DEBUG 430 atomic_long_t nr_slabs; 431 atomic_long_t total_objects; 432 struct list_head full; 433 #endif 434 }; 435 436 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 437 { 438 return s->node[node]; 439 } 440 441 /* 442 * Iterator over all nodes. The body will be executed for each node that has 443 * a kmem_cache_node structure allocated (which is true for all online nodes) 444 */ 445 #define for_each_kmem_cache_node(__s, __node, __n) \ 446 for (__node = 0; __node < nr_node_ids; __node++) \ 447 if ((__n = get_node(__s, __node))) 448 449 /* 450 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 451 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 452 * differ during memory hotplug/hotremove operations. 453 * Protected by slab_mutex. 454 */ 455 static nodemask_t slab_nodes; 456 457 #ifndef CONFIG_SLUB_TINY 458 /* 459 * Workqueue used for flush_cpu_slab(). 460 */ 461 static struct workqueue_struct *flushwq; 462 #endif 463 464 /******************************************************************** 465 * Core slab cache functions 466 *******************************************************************/ 467 468 /* 469 * Returns freelist pointer (ptr). With hardening, this is obfuscated 470 * with an XOR of the address where the pointer is held and a per-cache 471 * random number. 472 */ 473 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, 474 void *ptr, unsigned long ptr_addr) 475 { 476 unsigned long encoded; 477 478 #ifdef CONFIG_SLAB_FREELIST_HARDENED 479 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); 480 #else 481 encoded = (unsigned long)ptr; 482 #endif 483 return (freeptr_t){.v = encoded}; 484 } 485 486 static inline void *freelist_ptr_decode(const struct kmem_cache *s, 487 freeptr_t ptr, unsigned long ptr_addr) 488 { 489 void *decoded; 490 491 #ifdef CONFIG_SLAB_FREELIST_HARDENED 492 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); 493 #else 494 decoded = (void *)ptr.v; 495 #endif 496 return decoded; 497 } 498 499 static inline void *get_freepointer(struct kmem_cache *s, void *object) 500 { 501 unsigned long ptr_addr; 502 freeptr_t p; 503 504 object = kasan_reset_tag(object); 505 ptr_addr = (unsigned long)object + s->offset; 506 p = *(freeptr_t *)(ptr_addr); 507 return freelist_ptr_decode(s, p, ptr_addr); 508 } 509 510 #ifndef CONFIG_SLUB_TINY 511 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 512 { 513 prefetchw(object + s->offset); 514 } 515 #endif 516 517 /* 518 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 519 * pointer value in the case the current thread loses the race for the next 520 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 521 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 522 * KMSAN will still check all arguments of cmpxchg because of imperfect 523 * handling of inline assembly. 524 * To work around this problem, we apply __no_kmsan_checks to ensure that 525 * get_freepointer_safe() returns initialized memory. 526 */ 527 __no_kmsan_checks 528 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 529 { 530 unsigned long freepointer_addr; 531 freeptr_t p; 532 533 if (!debug_pagealloc_enabled_static()) 534 return get_freepointer(s, object); 535 536 object = kasan_reset_tag(object); 537 freepointer_addr = (unsigned long)object + s->offset; 538 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); 539 return freelist_ptr_decode(s, p, freepointer_addr); 540 } 541 542 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 543 { 544 unsigned long freeptr_addr = (unsigned long)object + s->offset; 545 546 #ifdef CONFIG_SLAB_FREELIST_HARDENED 547 BUG_ON(object == fp); /* naive detection of double free or corruption */ 548 #endif 549 550 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 551 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); 552 } 553 554 /* 555 * See comment in calculate_sizes(). 556 */ 557 static inline bool freeptr_outside_object(struct kmem_cache *s) 558 { 559 return s->offset >= s->inuse; 560 } 561 562 /* 563 * Return offset of the end of info block which is inuse + free pointer if 564 * not overlapping with object. 565 */ 566 static inline unsigned int get_info_end(struct kmem_cache *s) 567 { 568 if (freeptr_outside_object(s)) 569 return s->inuse + sizeof(void *); 570 else 571 return s->inuse; 572 } 573 574 /* Loop over all objects in a slab */ 575 #define for_each_object(__p, __s, __addr, __objects) \ 576 for (__p = fixup_red_left(__s, __addr); \ 577 __p < (__addr) + (__objects) * (__s)->size; \ 578 __p += (__s)->size) 579 580 static inline unsigned int order_objects(unsigned int order, unsigned int size) 581 { 582 return ((unsigned int)PAGE_SIZE << order) / size; 583 } 584 585 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 586 unsigned int size) 587 { 588 struct kmem_cache_order_objects x = { 589 (order << OO_SHIFT) + order_objects(order, size) 590 }; 591 592 return x; 593 } 594 595 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 596 { 597 return x.x >> OO_SHIFT; 598 } 599 600 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 601 { 602 return x.x & OO_MASK; 603 } 604 605 #ifdef CONFIG_SLUB_CPU_PARTIAL 606 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 607 { 608 unsigned int nr_slabs; 609 610 s->cpu_partial = nr_objects; 611 612 /* 613 * We take the number of objects but actually limit the number of 614 * slabs on the per cpu partial list, in order to limit excessive 615 * growth of the list. For simplicity we assume that the slabs will 616 * be half-full. 617 */ 618 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 619 s->cpu_partial_slabs = nr_slabs; 620 } 621 622 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 623 { 624 return s->cpu_partial_slabs; 625 } 626 #else 627 static inline void 628 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 629 { 630 } 631 632 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 633 { 634 return 0; 635 } 636 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 637 638 /* 639 * Per slab locking using the pagelock 640 */ 641 static __always_inline void slab_lock(struct slab *slab) 642 { 643 bit_spin_lock(PG_locked, &slab->__page_flags); 644 } 645 646 static __always_inline void slab_unlock(struct slab *slab) 647 { 648 bit_spin_unlock(PG_locked, &slab->__page_flags); 649 } 650 651 static inline bool 652 __update_freelist_fast(struct slab *slab, 653 void *freelist_old, unsigned long counters_old, 654 void *freelist_new, unsigned long counters_new) 655 { 656 #ifdef system_has_freelist_aba 657 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; 658 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; 659 660 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); 661 #else 662 return false; 663 #endif 664 } 665 666 static inline bool 667 __update_freelist_slow(struct slab *slab, 668 void *freelist_old, unsigned long counters_old, 669 void *freelist_new, unsigned long counters_new) 670 { 671 bool ret = false; 672 673 slab_lock(slab); 674 if (slab->freelist == freelist_old && 675 slab->counters == counters_old) { 676 slab->freelist = freelist_new; 677 slab->counters = counters_new; 678 ret = true; 679 } 680 slab_unlock(slab); 681 682 return ret; 683 } 684 685 /* 686 * Interrupts must be disabled (for the fallback code to work right), typically 687 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 688 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 689 * allocation/ free operation in hardirq context. Therefore nothing can 690 * interrupt the operation. 691 */ 692 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 693 void *freelist_old, unsigned long counters_old, 694 void *freelist_new, unsigned long counters_new, 695 const char *n) 696 { 697 bool ret; 698 699 if (USE_LOCKLESS_FAST_PATH()) 700 lockdep_assert_irqs_disabled(); 701 702 if (s->flags & __CMPXCHG_DOUBLE) { 703 ret = __update_freelist_fast(slab, freelist_old, counters_old, 704 freelist_new, counters_new); 705 } else { 706 ret = __update_freelist_slow(slab, freelist_old, counters_old, 707 freelist_new, counters_new); 708 } 709 if (likely(ret)) 710 return true; 711 712 cpu_relax(); 713 stat(s, CMPXCHG_DOUBLE_FAIL); 714 715 #ifdef SLUB_DEBUG_CMPXCHG 716 pr_info("%s %s: cmpxchg double redo ", n, s->name); 717 #endif 718 719 return false; 720 } 721 722 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 723 void *freelist_old, unsigned long counters_old, 724 void *freelist_new, unsigned long counters_new, 725 const char *n) 726 { 727 bool ret; 728 729 if (s->flags & __CMPXCHG_DOUBLE) { 730 ret = __update_freelist_fast(slab, freelist_old, counters_old, 731 freelist_new, counters_new); 732 } else { 733 unsigned long flags; 734 735 local_irq_save(flags); 736 ret = __update_freelist_slow(slab, freelist_old, counters_old, 737 freelist_new, counters_new); 738 local_irq_restore(flags); 739 } 740 if (likely(ret)) 741 return true; 742 743 cpu_relax(); 744 stat(s, CMPXCHG_DOUBLE_FAIL); 745 746 #ifdef SLUB_DEBUG_CMPXCHG 747 pr_info("%s %s: cmpxchg double redo ", n, s->name); 748 #endif 749 750 return false; 751 } 752 753 /* 754 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 755 * family will round up the real request size to these fixed ones, so 756 * there could be an extra area than what is requested. Save the original 757 * request size in the meta data area, for better debug and sanity check. 758 */ 759 static inline void set_orig_size(struct kmem_cache *s, 760 void *object, unsigned int orig_size) 761 { 762 void *p = kasan_reset_tag(object); 763 unsigned int kasan_meta_size; 764 765 if (!slub_debug_orig_size(s)) 766 return; 767 768 /* 769 * KASAN can save its free meta data inside of the object at offset 0. 770 * If this meta data size is larger than 'orig_size', it will overlap 771 * the data redzone in [orig_size+1, object_size]. Thus, we adjust 772 * 'orig_size' to be as at least as big as KASAN's meta data. 773 */ 774 kasan_meta_size = kasan_metadata_size(s, true); 775 if (kasan_meta_size > orig_size) 776 orig_size = kasan_meta_size; 777 778 p += get_info_end(s); 779 p += sizeof(struct track) * 2; 780 781 *(unsigned int *)p = orig_size; 782 } 783 784 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 785 { 786 void *p = kasan_reset_tag(object); 787 788 if (!slub_debug_orig_size(s)) 789 return s->object_size; 790 791 p += get_info_end(s); 792 p += sizeof(struct track) * 2; 793 794 return *(unsigned int *)p; 795 } 796 797 #ifdef CONFIG_SLUB_DEBUG 798 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 799 static DEFINE_SPINLOCK(object_map_lock); 800 801 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 802 struct slab *slab) 803 { 804 void *addr = slab_address(slab); 805 void *p; 806 807 bitmap_zero(obj_map, slab->objects); 808 809 for (p = slab->freelist; p; p = get_freepointer(s, p)) 810 set_bit(__obj_to_index(s, addr, p), obj_map); 811 } 812 813 #if IS_ENABLED(CONFIG_KUNIT) 814 static bool slab_add_kunit_errors(void) 815 { 816 struct kunit_resource *resource; 817 818 if (!kunit_get_current_test()) 819 return false; 820 821 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 822 if (!resource) 823 return false; 824 825 (*(int *)resource->data)++; 826 kunit_put_resource(resource); 827 return true; 828 } 829 830 bool slab_in_kunit_test(void) 831 { 832 struct kunit_resource *resource; 833 834 if (!kunit_get_current_test()) 835 return false; 836 837 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 838 if (!resource) 839 return false; 840 841 kunit_put_resource(resource); 842 return true; 843 } 844 #else 845 static inline bool slab_add_kunit_errors(void) { return false; } 846 #endif 847 848 static inline unsigned int size_from_object(struct kmem_cache *s) 849 { 850 if (s->flags & SLAB_RED_ZONE) 851 return s->size - s->red_left_pad; 852 853 return s->size; 854 } 855 856 static inline void *restore_red_left(struct kmem_cache *s, void *p) 857 { 858 if (s->flags & SLAB_RED_ZONE) 859 p -= s->red_left_pad; 860 861 return p; 862 } 863 864 /* 865 * Debug settings: 866 */ 867 #if defined(CONFIG_SLUB_DEBUG_ON) 868 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 869 #else 870 static slab_flags_t slub_debug; 871 #endif 872 873 static char *slub_debug_string; 874 static int disable_higher_order_debug; 875 876 /* 877 * slub is about to manipulate internal object metadata. This memory lies 878 * outside the range of the allocated object, so accessing it would normally 879 * be reported by kasan as a bounds error. metadata_access_enable() is used 880 * to tell kasan that these accesses are OK. 881 */ 882 static inline void metadata_access_enable(void) 883 { 884 kasan_disable_current(); 885 kmsan_disable_current(); 886 } 887 888 static inline void metadata_access_disable(void) 889 { 890 kmsan_enable_current(); 891 kasan_enable_current(); 892 } 893 894 /* 895 * Object debugging 896 */ 897 898 /* Verify that a pointer has an address that is valid within a slab page */ 899 static inline int check_valid_pointer(struct kmem_cache *s, 900 struct slab *slab, void *object) 901 { 902 void *base; 903 904 if (!object) 905 return 1; 906 907 base = slab_address(slab); 908 object = kasan_reset_tag(object); 909 object = restore_red_left(s, object); 910 if (object < base || object >= base + slab->objects * s->size || 911 (object - base) % s->size) { 912 return 0; 913 } 914 915 return 1; 916 } 917 918 static void print_section(char *level, char *text, u8 *addr, 919 unsigned int length) 920 { 921 metadata_access_enable(); 922 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 923 16, 1, kasan_reset_tag((void *)addr), length, 1); 924 metadata_access_disable(); 925 } 926 927 static struct track *get_track(struct kmem_cache *s, void *object, 928 enum track_item alloc) 929 { 930 struct track *p; 931 932 p = object + get_info_end(s); 933 934 return kasan_reset_tag(p + alloc); 935 } 936 937 #ifdef CONFIG_STACKDEPOT 938 static noinline depot_stack_handle_t set_track_prepare(void) 939 { 940 depot_stack_handle_t handle; 941 unsigned long entries[TRACK_ADDRS_COUNT]; 942 unsigned int nr_entries; 943 944 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 945 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 946 947 return handle; 948 } 949 #else 950 static inline depot_stack_handle_t set_track_prepare(void) 951 { 952 return 0; 953 } 954 #endif 955 956 static void set_track_update(struct kmem_cache *s, void *object, 957 enum track_item alloc, unsigned long addr, 958 depot_stack_handle_t handle) 959 { 960 struct track *p = get_track(s, object, alloc); 961 962 #ifdef CONFIG_STACKDEPOT 963 p->handle = handle; 964 #endif 965 p->addr = addr; 966 p->cpu = smp_processor_id(); 967 p->pid = current->pid; 968 p->when = jiffies; 969 } 970 971 static __always_inline void set_track(struct kmem_cache *s, void *object, 972 enum track_item alloc, unsigned long addr) 973 { 974 depot_stack_handle_t handle = set_track_prepare(); 975 976 set_track_update(s, object, alloc, addr, handle); 977 } 978 979 static void init_tracking(struct kmem_cache *s, void *object) 980 { 981 struct track *p; 982 983 if (!(s->flags & SLAB_STORE_USER)) 984 return; 985 986 p = get_track(s, object, TRACK_ALLOC); 987 memset(p, 0, 2*sizeof(struct track)); 988 } 989 990 static void print_track(const char *s, struct track *t, unsigned long pr_time) 991 { 992 depot_stack_handle_t handle __maybe_unused; 993 994 if (!t->addr) 995 return; 996 997 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 998 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 999 #ifdef CONFIG_STACKDEPOT 1000 handle = READ_ONCE(t->handle); 1001 if (handle) 1002 stack_depot_print(handle); 1003 else 1004 pr_err("object allocation/free stack trace missing\n"); 1005 #endif 1006 } 1007 1008 void print_tracking(struct kmem_cache *s, void *object) 1009 { 1010 unsigned long pr_time = jiffies; 1011 if (!(s->flags & SLAB_STORE_USER)) 1012 return; 1013 1014 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 1015 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 1016 } 1017 1018 static void print_slab_info(const struct slab *slab) 1019 { 1020 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 1021 slab, slab->objects, slab->inuse, slab->freelist, 1022 &slab->__page_flags); 1023 } 1024 1025 void skip_orig_size_check(struct kmem_cache *s, const void *object) 1026 { 1027 set_orig_size(s, (void *)object, s->object_size); 1028 } 1029 1030 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 1031 { 1032 struct va_format vaf; 1033 va_list args; 1034 1035 va_start(args, fmt); 1036 vaf.fmt = fmt; 1037 vaf.va = &args; 1038 pr_err("=============================================================================\n"); 1039 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 1040 pr_err("-----------------------------------------------------------------------------\n\n"); 1041 va_end(args); 1042 } 1043 1044 __printf(2, 3) 1045 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 1046 { 1047 struct va_format vaf; 1048 va_list args; 1049 1050 if (slab_add_kunit_errors()) 1051 return; 1052 1053 va_start(args, fmt); 1054 vaf.fmt = fmt; 1055 vaf.va = &args; 1056 pr_err("FIX %s: %pV\n", s->name, &vaf); 1057 va_end(args); 1058 } 1059 1060 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 1061 { 1062 unsigned int off; /* Offset of last byte */ 1063 u8 *addr = slab_address(slab); 1064 1065 print_tracking(s, p); 1066 1067 print_slab_info(slab); 1068 1069 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 1070 p, p - addr, get_freepointer(s, p)); 1071 1072 if (s->flags & SLAB_RED_ZONE) 1073 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 1074 s->red_left_pad); 1075 else if (p > addr + 16) 1076 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 1077 1078 print_section(KERN_ERR, "Object ", p, 1079 min_t(unsigned int, s->object_size, PAGE_SIZE)); 1080 if (s->flags & SLAB_RED_ZONE) 1081 print_section(KERN_ERR, "Redzone ", p + s->object_size, 1082 s->inuse - s->object_size); 1083 1084 off = get_info_end(s); 1085 1086 if (s->flags & SLAB_STORE_USER) 1087 off += 2 * sizeof(struct track); 1088 1089 if (slub_debug_orig_size(s)) 1090 off += sizeof(unsigned int); 1091 1092 off += kasan_metadata_size(s, false); 1093 1094 if (off != size_from_object(s)) 1095 /* Beginning of the filler is the free pointer */ 1096 print_section(KERN_ERR, "Padding ", p + off, 1097 size_from_object(s) - off); 1098 1099 dump_stack(); 1100 } 1101 1102 static void object_err(struct kmem_cache *s, struct slab *slab, 1103 u8 *object, char *reason) 1104 { 1105 if (slab_add_kunit_errors()) 1106 return; 1107 1108 slab_bug(s, "%s", reason); 1109 print_trailer(s, slab, object); 1110 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1111 } 1112 1113 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1114 void **freelist, void *nextfree) 1115 { 1116 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 1117 !check_valid_pointer(s, slab, nextfree) && freelist) { 1118 object_err(s, slab, *freelist, "Freechain corrupt"); 1119 *freelist = NULL; 1120 slab_fix(s, "Isolate corrupted freechain"); 1121 return true; 1122 } 1123 1124 return false; 1125 } 1126 1127 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1128 const char *fmt, ...) 1129 { 1130 va_list args; 1131 char buf[100]; 1132 1133 if (slab_add_kunit_errors()) 1134 return; 1135 1136 va_start(args, fmt); 1137 vsnprintf(buf, sizeof(buf), fmt, args); 1138 va_end(args); 1139 slab_bug(s, "%s", buf); 1140 print_slab_info(slab); 1141 dump_stack(); 1142 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1143 } 1144 1145 static void init_object(struct kmem_cache *s, void *object, u8 val) 1146 { 1147 u8 *p = kasan_reset_tag(object); 1148 unsigned int poison_size = s->object_size; 1149 1150 if (s->flags & SLAB_RED_ZONE) { 1151 /* 1152 * Here and below, avoid overwriting the KMSAN shadow. Keeping 1153 * the shadow makes it possible to distinguish uninit-value 1154 * from use-after-free. 1155 */ 1156 memset_no_sanitize_memory(p - s->red_left_pad, val, 1157 s->red_left_pad); 1158 1159 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1160 /* 1161 * Redzone the extra allocated space by kmalloc than 1162 * requested, and the poison size will be limited to 1163 * the original request size accordingly. 1164 */ 1165 poison_size = get_orig_size(s, object); 1166 } 1167 } 1168 1169 if (s->flags & __OBJECT_POISON) { 1170 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1); 1171 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1); 1172 } 1173 1174 if (s->flags & SLAB_RED_ZONE) 1175 memset_no_sanitize_memory(p + poison_size, val, 1176 s->inuse - poison_size); 1177 } 1178 1179 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 1180 void *from, void *to) 1181 { 1182 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1183 memset(from, data, to - from); 1184 } 1185 1186 #ifdef CONFIG_KMSAN 1187 #define pad_check_attributes noinline __no_kmsan_checks 1188 #else 1189 #define pad_check_attributes 1190 #endif 1191 1192 static pad_check_attributes int 1193 check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1194 u8 *object, char *what, 1195 u8 *start, unsigned int value, unsigned int bytes) 1196 { 1197 u8 *fault; 1198 u8 *end; 1199 u8 *addr = slab_address(slab); 1200 1201 metadata_access_enable(); 1202 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1203 metadata_access_disable(); 1204 if (!fault) 1205 return 1; 1206 1207 end = start + bytes; 1208 while (end > fault && end[-1] == value) 1209 end--; 1210 1211 if (slab_add_kunit_errors()) 1212 goto skip_bug_print; 1213 1214 slab_bug(s, "%s overwritten", what); 1215 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1216 fault, end - 1, fault - addr, 1217 fault[0], value); 1218 1219 skip_bug_print: 1220 restore_bytes(s, what, value, fault, end); 1221 return 0; 1222 } 1223 1224 /* 1225 * Object layout: 1226 * 1227 * object address 1228 * Bytes of the object to be managed. 1229 * If the freepointer may overlay the object then the free 1230 * pointer is at the middle of the object. 1231 * 1232 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1233 * 0xa5 (POISON_END) 1234 * 1235 * object + s->object_size 1236 * Padding to reach word boundary. This is also used for Redzoning. 1237 * Padding is extended by another word if Redzoning is enabled and 1238 * object_size == inuse. 1239 * 1240 * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with 1241 * 0xcc (SLUB_RED_ACTIVE) for objects in use. 1242 * 1243 * object + s->inuse 1244 * Meta data starts here. 1245 * 1246 * A. Free pointer (if we cannot overwrite object on free) 1247 * B. Tracking data for SLAB_STORE_USER 1248 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1249 * D. Padding to reach required alignment boundary or at minimum 1250 * one word if debugging is on to be able to detect writes 1251 * before the word boundary. 1252 * 1253 * Padding is done using 0x5a (POISON_INUSE) 1254 * 1255 * object + s->size 1256 * Nothing is used beyond s->size. 1257 * 1258 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1259 * ignored. And therefore no slab options that rely on these boundaries 1260 * may be used with merged slabcaches. 1261 */ 1262 1263 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1264 { 1265 unsigned long off = get_info_end(s); /* The end of info */ 1266 1267 if (s->flags & SLAB_STORE_USER) { 1268 /* We also have user information there */ 1269 off += 2 * sizeof(struct track); 1270 1271 if (s->flags & SLAB_KMALLOC) 1272 off += sizeof(unsigned int); 1273 } 1274 1275 off += kasan_metadata_size(s, false); 1276 1277 if (size_from_object(s) == off) 1278 return 1; 1279 1280 return check_bytes_and_report(s, slab, p, "Object padding", 1281 p + off, POISON_INUSE, size_from_object(s) - off); 1282 } 1283 1284 /* Check the pad bytes at the end of a slab page */ 1285 static pad_check_attributes void 1286 slab_pad_check(struct kmem_cache *s, struct slab *slab) 1287 { 1288 u8 *start; 1289 u8 *fault; 1290 u8 *end; 1291 u8 *pad; 1292 int length; 1293 int remainder; 1294 1295 if (!(s->flags & SLAB_POISON)) 1296 return; 1297 1298 start = slab_address(slab); 1299 length = slab_size(slab); 1300 end = start + length; 1301 remainder = length % s->size; 1302 if (!remainder) 1303 return; 1304 1305 pad = end - remainder; 1306 metadata_access_enable(); 1307 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1308 metadata_access_disable(); 1309 if (!fault) 1310 return; 1311 while (end > fault && end[-1] == POISON_INUSE) 1312 end--; 1313 1314 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1315 fault, end - 1, fault - start); 1316 print_section(KERN_ERR, "Padding ", pad, remainder); 1317 1318 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1319 } 1320 1321 static int check_object(struct kmem_cache *s, struct slab *slab, 1322 void *object, u8 val) 1323 { 1324 u8 *p = object; 1325 u8 *endobject = object + s->object_size; 1326 unsigned int orig_size, kasan_meta_size; 1327 int ret = 1; 1328 1329 if (s->flags & SLAB_RED_ZONE) { 1330 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1331 object - s->red_left_pad, val, s->red_left_pad)) 1332 ret = 0; 1333 1334 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1335 endobject, val, s->inuse - s->object_size)) 1336 ret = 0; 1337 1338 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1339 orig_size = get_orig_size(s, object); 1340 1341 if (s->object_size > orig_size && 1342 !check_bytes_and_report(s, slab, object, 1343 "kmalloc Redzone", p + orig_size, 1344 val, s->object_size - orig_size)) { 1345 ret = 0; 1346 } 1347 } 1348 } else { 1349 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1350 if (!check_bytes_and_report(s, slab, p, "Alignment padding", 1351 endobject, POISON_INUSE, 1352 s->inuse - s->object_size)) 1353 ret = 0; 1354 } 1355 } 1356 1357 if (s->flags & SLAB_POISON) { 1358 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { 1359 /* 1360 * KASAN can save its free meta data inside of the 1361 * object at offset 0. Thus, skip checking the part of 1362 * the redzone that overlaps with the meta data. 1363 */ 1364 kasan_meta_size = kasan_metadata_size(s, true); 1365 if (kasan_meta_size < s->object_size - 1 && 1366 !check_bytes_and_report(s, slab, p, "Poison", 1367 p + kasan_meta_size, POISON_FREE, 1368 s->object_size - kasan_meta_size - 1)) 1369 ret = 0; 1370 if (kasan_meta_size < s->object_size && 1371 !check_bytes_and_report(s, slab, p, "End Poison", 1372 p + s->object_size - 1, POISON_END, 1)) 1373 ret = 0; 1374 } 1375 /* 1376 * check_pad_bytes cleans up on its own. 1377 */ 1378 if (!check_pad_bytes(s, slab, p)) 1379 ret = 0; 1380 } 1381 1382 /* 1383 * Cannot check freepointer while object is allocated if 1384 * object and freepointer overlap. 1385 */ 1386 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) && 1387 !check_valid_pointer(s, slab, get_freepointer(s, p))) { 1388 object_err(s, slab, p, "Freepointer corrupt"); 1389 /* 1390 * No choice but to zap it and thus lose the remainder 1391 * of the free objects in this slab. May cause 1392 * another error because the object count is now wrong. 1393 */ 1394 set_freepointer(s, p, NULL); 1395 ret = 0; 1396 } 1397 1398 if (!ret && !slab_in_kunit_test()) { 1399 print_trailer(s, slab, object); 1400 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1401 } 1402 1403 return ret; 1404 } 1405 1406 static int check_slab(struct kmem_cache *s, struct slab *slab) 1407 { 1408 int maxobj; 1409 1410 if (!folio_test_slab(slab_folio(slab))) { 1411 slab_err(s, slab, "Not a valid slab page"); 1412 return 0; 1413 } 1414 1415 maxobj = order_objects(slab_order(slab), s->size); 1416 if (slab->objects > maxobj) { 1417 slab_err(s, slab, "objects %u > max %u", 1418 slab->objects, maxobj); 1419 return 0; 1420 } 1421 if (slab->inuse > slab->objects) { 1422 slab_err(s, slab, "inuse %u > max %u", 1423 slab->inuse, slab->objects); 1424 return 0; 1425 } 1426 /* Slab_pad_check fixes things up after itself */ 1427 slab_pad_check(s, slab); 1428 return 1; 1429 } 1430 1431 /* 1432 * Determine if a certain object in a slab is on the freelist. Must hold the 1433 * slab lock to guarantee that the chains are in a consistent state. 1434 */ 1435 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1436 { 1437 int nr = 0; 1438 void *fp; 1439 void *object = NULL; 1440 int max_objects; 1441 1442 fp = slab->freelist; 1443 while (fp && nr <= slab->objects) { 1444 if (fp == search) 1445 return 1; 1446 if (!check_valid_pointer(s, slab, fp)) { 1447 if (object) { 1448 object_err(s, slab, object, 1449 "Freechain corrupt"); 1450 set_freepointer(s, object, NULL); 1451 } else { 1452 slab_err(s, slab, "Freepointer corrupt"); 1453 slab->freelist = NULL; 1454 slab->inuse = slab->objects; 1455 slab_fix(s, "Freelist cleared"); 1456 return 0; 1457 } 1458 break; 1459 } 1460 object = fp; 1461 fp = get_freepointer(s, object); 1462 nr++; 1463 } 1464 1465 max_objects = order_objects(slab_order(slab), s->size); 1466 if (max_objects > MAX_OBJS_PER_PAGE) 1467 max_objects = MAX_OBJS_PER_PAGE; 1468 1469 if (slab->objects != max_objects) { 1470 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1471 slab->objects, max_objects); 1472 slab->objects = max_objects; 1473 slab_fix(s, "Number of objects adjusted"); 1474 } 1475 if (slab->inuse != slab->objects - nr) { 1476 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1477 slab->inuse, slab->objects - nr); 1478 slab->inuse = slab->objects - nr; 1479 slab_fix(s, "Object count adjusted"); 1480 } 1481 return search == NULL; 1482 } 1483 1484 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1485 int alloc) 1486 { 1487 if (s->flags & SLAB_TRACE) { 1488 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1489 s->name, 1490 alloc ? "alloc" : "free", 1491 object, slab->inuse, 1492 slab->freelist); 1493 1494 if (!alloc) 1495 print_section(KERN_INFO, "Object ", (void *)object, 1496 s->object_size); 1497 1498 dump_stack(); 1499 } 1500 } 1501 1502 /* 1503 * Tracking of fully allocated slabs for debugging purposes. 1504 */ 1505 static void add_full(struct kmem_cache *s, 1506 struct kmem_cache_node *n, struct slab *slab) 1507 { 1508 if (!(s->flags & SLAB_STORE_USER)) 1509 return; 1510 1511 lockdep_assert_held(&n->list_lock); 1512 list_add(&slab->slab_list, &n->full); 1513 } 1514 1515 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1516 { 1517 if (!(s->flags & SLAB_STORE_USER)) 1518 return; 1519 1520 lockdep_assert_held(&n->list_lock); 1521 list_del(&slab->slab_list); 1522 } 1523 1524 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1525 { 1526 return atomic_long_read(&n->nr_slabs); 1527 } 1528 1529 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1530 { 1531 struct kmem_cache_node *n = get_node(s, node); 1532 1533 atomic_long_inc(&n->nr_slabs); 1534 atomic_long_add(objects, &n->total_objects); 1535 } 1536 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1537 { 1538 struct kmem_cache_node *n = get_node(s, node); 1539 1540 atomic_long_dec(&n->nr_slabs); 1541 atomic_long_sub(objects, &n->total_objects); 1542 } 1543 1544 /* Object debug checks for alloc/free paths */ 1545 static void setup_object_debug(struct kmem_cache *s, void *object) 1546 { 1547 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1548 return; 1549 1550 init_object(s, object, SLUB_RED_INACTIVE); 1551 init_tracking(s, object); 1552 } 1553 1554 static 1555 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1556 { 1557 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1558 return; 1559 1560 metadata_access_enable(); 1561 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1562 metadata_access_disable(); 1563 } 1564 1565 static inline int alloc_consistency_checks(struct kmem_cache *s, 1566 struct slab *slab, void *object) 1567 { 1568 if (!check_slab(s, slab)) 1569 return 0; 1570 1571 if (!check_valid_pointer(s, slab, object)) { 1572 object_err(s, slab, object, "Freelist Pointer check fails"); 1573 return 0; 1574 } 1575 1576 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1577 return 0; 1578 1579 return 1; 1580 } 1581 1582 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1583 struct slab *slab, void *object, int orig_size) 1584 { 1585 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1586 if (!alloc_consistency_checks(s, slab, object)) 1587 goto bad; 1588 } 1589 1590 /* Success. Perform special debug activities for allocs */ 1591 trace(s, slab, object, 1); 1592 set_orig_size(s, object, orig_size); 1593 init_object(s, object, SLUB_RED_ACTIVE); 1594 return true; 1595 1596 bad: 1597 if (folio_test_slab(slab_folio(slab))) { 1598 /* 1599 * If this is a slab page then lets do the best we can 1600 * to avoid issues in the future. Marking all objects 1601 * as used avoids touching the remaining objects. 1602 */ 1603 slab_fix(s, "Marking all objects used"); 1604 slab->inuse = slab->objects; 1605 slab->freelist = NULL; 1606 } 1607 return false; 1608 } 1609 1610 static inline int free_consistency_checks(struct kmem_cache *s, 1611 struct slab *slab, void *object, unsigned long addr) 1612 { 1613 if (!check_valid_pointer(s, slab, object)) { 1614 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1615 return 0; 1616 } 1617 1618 if (on_freelist(s, slab, object)) { 1619 object_err(s, slab, object, "Object already free"); 1620 return 0; 1621 } 1622 1623 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1624 return 0; 1625 1626 if (unlikely(s != slab->slab_cache)) { 1627 if (!folio_test_slab(slab_folio(slab))) { 1628 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", 1629 object); 1630 } else if (!slab->slab_cache) { 1631 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1632 object); 1633 dump_stack(); 1634 } else 1635 object_err(s, slab, object, 1636 "page slab pointer corrupt."); 1637 return 0; 1638 } 1639 return 1; 1640 } 1641 1642 /* 1643 * Parse a block of slab_debug options. Blocks are delimited by ';' 1644 * 1645 * @str: start of block 1646 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1647 * @slabs: return start of list of slabs, or NULL when there's no list 1648 * @init: assume this is initial parsing and not per-kmem-create parsing 1649 * 1650 * returns the start of next block if there's any, or NULL 1651 */ 1652 static char * 1653 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1654 { 1655 bool higher_order_disable = false; 1656 1657 /* Skip any completely empty blocks */ 1658 while (*str && *str == ';') 1659 str++; 1660 1661 if (*str == ',') { 1662 /* 1663 * No options but restriction on slabs. This means full 1664 * debugging for slabs matching a pattern. 1665 */ 1666 *flags = DEBUG_DEFAULT_FLAGS; 1667 goto check_slabs; 1668 } 1669 *flags = 0; 1670 1671 /* Determine which debug features should be switched on */ 1672 for (; *str && *str != ',' && *str != ';'; str++) { 1673 switch (tolower(*str)) { 1674 case '-': 1675 *flags = 0; 1676 break; 1677 case 'f': 1678 *flags |= SLAB_CONSISTENCY_CHECKS; 1679 break; 1680 case 'z': 1681 *flags |= SLAB_RED_ZONE; 1682 break; 1683 case 'p': 1684 *flags |= SLAB_POISON; 1685 break; 1686 case 'u': 1687 *flags |= SLAB_STORE_USER; 1688 break; 1689 case 't': 1690 *flags |= SLAB_TRACE; 1691 break; 1692 case 'a': 1693 *flags |= SLAB_FAILSLAB; 1694 break; 1695 case 'o': 1696 /* 1697 * Avoid enabling debugging on caches if its minimum 1698 * order would increase as a result. 1699 */ 1700 higher_order_disable = true; 1701 break; 1702 default: 1703 if (init) 1704 pr_err("slab_debug option '%c' unknown. skipped\n", *str); 1705 } 1706 } 1707 check_slabs: 1708 if (*str == ',') 1709 *slabs = ++str; 1710 else 1711 *slabs = NULL; 1712 1713 /* Skip over the slab list */ 1714 while (*str && *str != ';') 1715 str++; 1716 1717 /* Skip any completely empty blocks */ 1718 while (*str && *str == ';') 1719 str++; 1720 1721 if (init && higher_order_disable) 1722 disable_higher_order_debug = 1; 1723 1724 if (*str) 1725 return str; 1726 else 1727 return NULL; 1728 } 1729 1730 static int __init setup_slub_debug(char *str) 1731 { 1732 slab_flags_t flags; 1733 slab_flags_t global_flags; 1734 char *saved_str; 1735 char *slab_list; 1736 bool global_slub_debug_changed = false; 1737 bool slab_list_specified = false; 1738 1739 global_flags = DEBUG_DEFAULT_FLAGS; 1740 if (*str++ != '=' || !*str) 1741 /* 1742 * No options specified. Switch on full debugging. 1743 */ 1744 goto out; 1745 1746 saved_str = str; 1747 while (str) { 1748 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1749 1750 if (!slab_list) { 1751 global_flags = flags; 1752 global_slub_debug_changed = true; 1753 } else { 1754 slab_list_specified = true; 1755 if (flags & SLAB_STORE_USER) 1756 stack_depot_request_early_init(); 1757 } 1758 } 1759 1760 /* 1761 * For backwards compatibility, a single list of flags with list of 1762 * slabs means debugging is only changed for those slabs, so the global 1763 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1764 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1765 * long as there is no option specifying flags without a slab list. 1766 */ 1767 if (slab_list_specified) { 1768 if (!global_slub_debug_changed) 1769 global_flags = slub_debug; 1770 slub_debug_string = saved_str; 1771 } 1772 out: 1773 slub_debug = global_flags; 1774 if (slub_debug & SLAB_STORE_USER) 1775 stack_depot_request_early_init(); 1776 if (slub_debug != 0 || slub_debug_string) 1777 static_branch_enable(&slub_debug_enabled); 1778 else 1779 static_branch_disable(&slub_debug_enabled); 1780 if ((static_branch_unlikely(&init_on_alloc) || 1781 static_branch_unlikely(&init_on_free)) && 1782 (slub_debug & SLAB_POISON)) 1783 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1784 return 1; 1785 } 1786 1787 __setup("slab_debug", setup_slub_debug); 1788 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0); 1789 1790 /* 1791 * kmem_cache_flags - apply debugging options to the cache 1792 * @flags: flags to set 1793 * @name: name of the cache 1794 * 1795 * Debug option(s) are applied to @flags. In addition to the debug 1796 * option(s), if a slab name (or multiple) is specified i.e. 1797 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1798 * then only the select slabs will receive the debug option(s). 1799 */ 1800 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1801 { 1802 char *iter; 1803 size_t len; 1804 char *next_block; 1805 slab_flags_t block_flags; 1806 slab_flags_t slub_debug_local = slub_debug; 1807 1808 if (flags & SLAB_NO_USER_FLAGS) 1809 return flags; 1810 1811 /* 1812 * If the slab cache is for debugging (e.g. kmemleak) then 1813 * don't store user (stack trace) information by default, 1814 * but let the user enable it via the command line below. 1815 */ 1816 if (flags & SLAB_NOLEAKTRACE) 1817 slub_debug_local &= ~SLAB_STORE_USER; 1818 1819 len = strlen(name); 1820 next_block = slub_debug_string; 1821 /* Go through all blocks of debug options, see if any matches our slab's name */ 1822 while (next_block) { 1823 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1824 if (!iter) 1825 continue; 1826 /* Found a block that has a slab list, search it */ 1827 while (*iter) { 1828 char *end, *glob; 1829 size_t cmplen; 1830 1831 end = strchrnul(iter, ','); 1832 if (next_block && next_block < end) 1833 end = next_block - 1; 1834 1835 glob = strnchr(iter, end - iter, '*'); 1836 if (glob) 1837 cmplen = glob - iter; 1838 else 1839 cmplen = max_t(size_t, len, (end - iter)); 1840 1841 if (!strncmp(name, iter, cmplen)) { 1842 flags |= block_flags; 1843 return flags; 1844 } 1845 1846 if (!*end || *end == ';') 1847 break; 1848 iter = end + 1; 1849 } 1850 } 1851 1852 return flags | slub_debug_local; 1853 } 1854 #else /* !CONFIG_SLUB_DEBUG */ 1855 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1856 static inline 1857 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1858 1859 static inline bool alloc_debug_processing(struct kmem_cache *s, 1860 struct slab *slab, void *object, int orig_size) { return true; } 1861 1862 static inline bool free_debug_processing(struct kmem_cache *s, 1863 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1864 unsigned long addr, depot_stack_handle_t handle) { return true; } 1865 1866 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1867 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1868 void *object, u8 val) { return 1; } 1869 static inline depot_stack_handle_t set_track_prepare(void) { return 0; } 1870 static inline void set_track(struct kmem_cache *s, void *object, 1871 enum track_item alloc, unsigned long addr) {} 1872 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1873 struct slab *slab) {} 1874 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1875 struct slab *slab) {} 1876 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1877 { 1878 return flags; 1879 } 1880 #define slub_debug 0 1881 1882 #define disable_higher_order_debug 0 1883 1884 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1885 { return 0; } 1886 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1887 int objects) {} 1888 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1889 int objects) {} 1890 #ifndef CONFIG_SLUB_TINY 1891 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1892 void **freelist, void *nextfree) 1893 { 1894 return false; 1895 } 1896 #endif 1897 #endif /* CONFIG_SLUB_DEBUG */ 1898 1899 #ifdef CONFIG_SLAB_OBJ_EXT 1900 1901 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 1902 1903 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 1904 { 1905 struct slabobj_ext *slab_exts; 1906 struct slab *obj_exts_slab; 1907 1908 obj_exts_slab = virt_to_slab(obj_exts); 1909 slab_exts = slab_obj_exts(obj_exts_slab); 1910 if (slab_exts) { 1911 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 1912 obj_exts_slab, obj_exts); 1913 /* codetag should be NULL */ 1914 WARN_ON(slab_exts[offs].ref.ct); 1915 set_codetag_empty(&slab_exts[offs].ref); 1916 } 1917 } 1918 1919 static inline void mark_failed_objexts_alloc(struct slab *slab) 1920 { 1921 slab->obj_exts = OBJEXTS_ALLOC_FAIL; 1922 } 1923 1924 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 1925 struct slabobj_ext *vec, unsigned int objects) 1926 { 1927 /* 1928 * If vector previously failed to allocate then we have live 1929 * objects with no tag reference. Mark all references in this 1930 * vector as empty to avoid warnings later on. 1931 */ 1932 if (obj_exts & OBJEXTS_ALLOC_FAIL) { 1933 unsigned int i; 1934 1935 for (i = 0; i < objects; i++) 1936 set_codetag_empty(&vec[i].ref); 1937 } 1938 } 1939 1940 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 1941 1942 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} 1943 static inline void mark_failed_objexts_alloc(struct slab *slab) {} 1944 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 1945 struct slabobj_ext *vec, unsigned int objects) {} 1946 1947 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 1948 1949 /* 1950 * The allocated objcg pointers array is not accounted directly. 1951 * Moreover, it should not come from DMA buffer and is not readily 1952 * reclaimable. So those GFP bits should be masked off. 1953 */ 1954 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ 1955 __GFP_ACCOUNT | __GFP_NOFAIL) 1956 1957 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 1958 gfp_t gfp, bool new_slab) 1959 { 1960 unsigned int objects = objs_per_slab(s, slab); 1961 unsigned long new_exts; 1962 unsigned long old_exts; 1963 struct slabobj_ext *vec; 1964 1965 gfp &= ~OBJCGS_CLEAR_MASK; 1966 /* Prevent recursive extension vector allocation */ 1967 gfp |= __GFP_NO_OBJ_EXT; 1968 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, 1969 slab_nid(slab)); 1970 if (!vec) { 1971 /* Mark vectors which failed to allocate */ 1972 if (new_slab) 1973 mark_failed_objexts_alloc(slab); 1974 1975 return -ENOMEM; 1976 } 1977 1978 new_exts = (unsigned long)vec; 1979 #ifdef CONFIG_MEMCG 1980 new_exts |= MEMCG_DATA_OBJEXTS; 1981 #endif 1982 old_exts = READ_ONCE(slab->obj_exts); 1983 handle_failed_objexts_alloc(old_exts, vec, objects); 1984 if (new_slab) { 1985 /* 1986 * If the slab is brand new and nobody can yet access its 1987 * obj_exts, no synchronization is required and obj_exts can 1988 * be simply assigned. 1989 */ 1990 slab->obj_exts = new_exts; 1991 } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || 1992 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { 1993 /* 1994 * If the slab is already in use, somebody can allocate and 1995 * assign slabobj_exts in parallel. In this case the existing 1996 * objcg vector should be reused. 1997 */ 1998 mark_objexts_empty(vec); 1999 kfree(vec); 2000 return 0; 2001 } 2002 2003 kmemleak_not_leak(vec); 2004 return 0; 2005 } 2006 2007 static inline void free_slab_obj_exts(struct slab *slab) 2008 { 2009 struct slabobj_ext *obj_exts; 2010 2011 obj_exts = slab_obj_exts(slab); 2012 if (!obj_exts) 2013 return; 2014 2015 /* 2016 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its 2017 * corresponding extension will be NULL. alloc_tag_sub() will throw a 2018 * warning if slab has extensions but the extension of an object is 2019 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that 2020 * the extension for obj_exts is expected to be NULL. 2021 */ 2022 mark_objexts_empty(obj_exts); 2023 kfree(obj_exts); 2024 slab->obj_exts = 0; 2025 } 2026 2027 static inline bool need_slab_obj_ext(void) 2028 { 2029 if (mem_alloc_profiling_enabled()) 2030 return true; 2031 2032 /* 2033 * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally 2034 * inside memcg_slab_post_alloc_hook. No other users for now. 2035 */ 2036 return false; 2037 } 2038 2039 #else /* CONFIG_SLAB_OBJ_EXT */ 2040 2041 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 2042 gfp_t gfp, bool new_slab) 2043 { 2044 return 0; 2045 } 2046 2047 static inline void free_slab_obj_exts(struct slab *slab) 2048 { 2049 } 2050 2051 static inline bool need_slab_obj_ext(void) 2052 { 2053 return false; 2054 } 2055 2056 #endif /* CONFIG_SLAB_OBJ_EXT */ 2057 2058 #ifdef CONFIG_MEM_ALLOC_PROFILING 2059 2060 static inline struct slabobj_ext * 2061 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2062 { 2063 struct slab *slab; 2064 2065 if (!p) 2066 return NULL; 2067 2068 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2069 return NULL; 2070 2071 if (flags & __GFP_NO_OBJ_EXT) 2072 return NULL; 2073 2074 slab = virt_to_slab(p); 2075 if (!slab_obj_exts(slab) && 2076 WARN(alloc_slab_obj_exts(slab, s, flags, false), 2077 "%s, %s: Failed to create slab extension vector!\n", 2078 __func__, s->name)) 2079 return NULL; 2080 2081 return slab_obj_exts(slab) + obj_to_index(s, slab, p); 2082 } 2083 2084 static inline void 2085 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2086 { 2087 if (need_slab_obj_ext()) { 2088 struct slabobj_ext *obj_exts; 2089 2090 obj_exts = prepare_slab_obj_exts_hook(s, flags, object); 2091 /* 2092 * Currently obj_exts is used only for allocation profiling. 2093 * If other users appear then mem_alloc_profiling_enabled() 2094 * check should be added before alloc_tag_add(). 2095 */ 2096 if (likely(obj_exts)) 2097 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); 2098 } 2099 } 2100 2101 static inline void 2102 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2103 int objects) 2104 { 2105 struct slabobj_ext *obj_exts; 2106 int i; 2107 2108 if (!mem_alloc_profiling_enabled()) 2109 return; 2110 2111 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ 2112 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2113 return; 2114 2115 obj_exts = slab_obj_exts(slab); 2116 if (!obj_exts) 2117 return; 2118 2119 for (i = 0; i < objects; i++) { 2120 unsigned int off = obj_to_index(s, slab, p[i]); 2121 2122 alloc_tag_sub(&obj_exts[off].ref, s->size); 2123 } 2124 } 2125 2126 #else /* CONFIG_MEM_ALLOC_PROFILING */ 2127 2128 static inline void 2129 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2130 { 2131 } 2132 2133 static inline void 2134 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2135 int objects) 2136 { 2137 } 2138 2139 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 2140 2141 2142 #ifdef CONFIG_MEMCG 2143 2144 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); 2145 2146 static __fastpath_inline 2147 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2148 gfp_t flags, size_t size, void **p) 2149 { 2150 if (likely(!memcg_kmem_online())) 2151 return true; 2152 2153 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) 2154 return true; 2155 2156 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p))) 2157 return true; 2158 2159 if (likely(size == 1)) { 2160 memcg_alloc_abort_single(s, *p); 2161 *p = NULL; 2162 } else { 2163 kmem_cache_free_bulk(s, size, p); 2164 } 2165 2166 return false; 2167 } 2168 2169 static __fastpath_inline 2170 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2171 int objects) 2172 { 2173 struct slabobj_ext *obj_exts; 2174 2175 if (!memcg_kmem_online()) 2176 return; 2177 2178 obj_exts = slab_obj_exts(slab); 2179 if (likely(!obj_exts)) 2180 return; 2181 2182 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); 2183 } 2184 2185 static __fastpath_inline 2186 bool memcg_slab_post_charge(void *p, gfp_t flags) 2187 { 2188 struct slabobj_ext *slab_exts; 2189 struct kmem_cache *s; 2190 struct folio *folio; 2191 struct slab *slab; 2192 unsigned long off; 2193 2194 folio = virt_to_folio(p); 2195 if (!folio_test_slab(folio)) { 2196 return folio_memcg_kmem(folio) || 2197 (__memcg_kmem_charge_page(folio_page(folio, 0), flags, 2198 folio_order(folio)) == 0); 2199 } 2200 2201 slab = folio_slab(folio); 2202 s = slab->slab_cache; 2203 2204 /* 2205 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency 2206 * of slab_obj_exts being allocated from the same slab and thus the slab 2207 * becoming effectively unfreeable. 2208 */ 2209 if (is_kmalloc_normal(s)) 2210 return true; 2211 2212 /* Ignore already charged objects. */ 2213 slab_exts = slab_obj_exts(slab); 2214 if (slab_exts) { 2215 off = obj_to_index(s, slab, p); 2216 if (unlikely(slab_exts[off].objcg)) 2217 return true; 2218 } 2219 2220 return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p); 2221 } 2222 2223 #else /* CONFIG_MEMCG */ 2224 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, 2225 struct list_lru *lru, 2226 gfp_t flags, size_t size, 2227 void **p) 2228 { 2229 return true; 2230 } 2231 2232 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2233 void **p, int objects) 2234 { 2235 } 2236 2237 static inline bool memcg_slab_post_charge(void *p, gfp_t flags) 2238 { 2239 return true; 2240 } 2241 #endif /* CONFIG_MEMCG */ 2242 2243 #ifdef CONFIG_SLUB_RCU_DEBUG 2244 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head); 2245 2246 struct rcu_delayed_free { 2247 struct rcu_head head; 2248 void *object; 2249 }; 2250 #endif 2251 2252 /* 2253 * Hooks for other subsystems that check memory allocations. In a typical 2254 * production configuration these hooks all should produce no code at all. 2255 * 2256 * Returns true if freeing of the object can proceed, false if its reuse 2257 * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned 2258 * to KFENCE. 2259 */ 2260 static __always_inline 2261 bool slab_free_hook(struct kmem_cache *s, void *x, bool init, 2262 bool after_rcu_delay) 2263 { 2264 /* Are the object contents still accessible? */ 2265 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay; 2266 2267 kmemleak_free_recursive(x, s->flags); 2268 kmsan_slab_free(s, x); 2269 2270 debug_check_no_locks_freed(x, s->object_size); 2271 2272 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 2273 debug_check_no_obj_freed(x, s->object_size); 2274 2275 /* Use KCSAN to help debug racy use-after-free. */ 2276 if (!still_accessible) 2277 __kcsan_check_access(x, s->object_size, 2278 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 2279 2280 if (kfence_free(x)) 2281 return false; 2282 2283 /* 2284 * Give KASAN a chance to notice an invalid free operation before we 2285 * modify the object. 2286 */ 2287 if (kasan_slab_pre_free(s, x)) 2288 return false; 2289 2290 #ifdef CONFIG_SLUB_RCU_DEBUG 2291 if (still_accessible) { 2292 struct rcu_delayed_free *delayed_free; 2293 2294 delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT); 2295 if (delayed_free) { 2296 /* 2297 * Let KASAN track our call stack as a "related work 2298 * creation", just like if the object had been freed 2299 * normally via kfree_rcu(). 2300 * We have to do this manually because the rcu_head is 2301 * not located inside the object. 2302 */ 2303 kasan_record_aux_stack_noalloc(x); 2304 2305 delayed_free->object = x; 2306 call_rcu(&delayed_free->head, slab_free_after_rcu_debug); 2307 return false; 2308 } 2309 } 2310 #endif /* CONFIG_SLUB_RCU_DEBUG */ 2311 2312 /* 2313 * As memory initialization might be integrated into KASAN, 2314 * kasan_slab_free and initialization memset's must be 2315 * kept together to avoid discrepancies in behavior. 2316 * 2317 * The initialization memset's clear the object and the metadata, 2318 * but don't touch the SLAB redzone. 2319 * 2320 * The object's freepointer is also avoided if stored outside the 2321 * object. 2322 */ 2323 if (unlikely(init)) { 2324 int rsize; 2325 unsigned int inuse, orig_size; 2326 2327 inuse = get_info_end(s); 2328 orig_size = get_orig_size(s, x); 2329 if (!kasan_has_integrated_init()) 2330 memset(kasan_reset_tag(x), 0, orig_size); 2331 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 2332 memset((char *)kasan_reset_tag(x) + inuse, 0, 2333 s->size - inuse - rsize); 2334 /* 2335 * Restore orig_size, otherwize kmalloc redzone overwritten 2336 * would be reported 2337 */ 2338 set_orig_size(s, x, orig_size); 2339 2340 } 2341 /* KASAN might put x into memory quarantine, delaying its reuse. */ 2342 return !kasan_slab_free(s, x, init, still_accessible); 2343 } 2344 2345 static __fastpath_inline 2346 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail, 2347 int *cnt) 2348 { 2349 2350 void *object; 2351 void *next = *head; 2352 void *old_tail = *tail; 2353 bool init; 2354 2355 if (is_kfence_address(next)) { 2356 slab_free_hook(s, next, false, false); 2357 return false; 2358 } 2359 2360 /* Head and tail of the reconstructed freelist */ 2361 *head = NULL; 2362 *tail = NULL; 2363 2364 init = slab_want_init_on_free(s); 2365 2366 do { 2367 object = next; 2368 next = get_freepointer(s, object); 2369 2370 /* If object's reuse doesn't have to be delayed */ 2371 if (likely(slab_free_hook(s, object, init, false))) { 2372 /* Move object to the new freelist */ 2373 set_freepointer(s, object, *head); 2374 *head = object; 2375 if (!*tail) 2376 *tail = object; 2377 } else { 2378 /* 2379 * Adjust the reconstructed freelist depth 2380 * accordingly if object's reuse is delayed. 2381 */ 2382 --(*cnt); 2383 } 2384 } while (object != old_tail); 2385 2386 return *head != NULL; 2387 } 2388 2389 static void *setup_object(struct kmem_cache *s, void *object) 2390 { 2391 setup_object_debug(s, object); 2392 object = kasan_init_slab_obj(s, object); 2393 if (unlikely(s->ctor)) { 2394 kasan_unpoison_new_object(s, object); 2395 s->ctor(object); 2396 kasan_poison_new_object(s, object); 2397 } 2398 return object; 2399 } 2400 2401 /* 2402 * Slab allocation and freeing 2403 */ 2404 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 2405 struct kmem_cache_order_objects oo) 2406 { 2407 struct folio *folio; 2408 struct slab *slab; 2409 unsigned int order = oo_order(oo); 2410 2411 if (node == NUMA_NO_NODE) 2412 folio = (struct folio *)alloc_pages(flags, order); 2413 else 2414 folio = (struct folio *)__alloc_pages_node(node, flags, order); 2415 2416 if (!folio) 2417 return NULL; 2418 2419 slab = folio_slab(folio); 2420 __folio_set_slab(folio); 2421 /* Make the flag visible before any changes to folio->mapping */ 2422 smp_wmb(); 2423 if (folio_is_pfmemalloc(folio)) 2424 slab_set_pfmemalloc(slab); 2425 2426 return slab; 2427 } 2428 2429 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2430 /* Pre-initialize the random sequence cache */ 2431 static int init_cache_random_seq(struct kmem_cache *s) 2432 { 2433 unsigned int count = oo_objects(s->oo); 2434 int err; 2435 2436 /* Bailout if already initialised */ 2437 if (s->random_seq) 2438 return 0; 2439 2440 err = cache_random_seq_create(s, count, GFP_KERNEL); 2441 if (err) { 2442 pr_err("SLUB: Unable to initialize free list for %s\n", 2443 s->name); 2444 return err; 2445 } 2446 2447 /* Transform to an offset on the set of pages */ 2448 if (s->random_seq) { 2449 unsigned int i; 2450 2451 for (i = 0; i < count; i++) 2452 s->random_seq[i] *= s->size; 2453 } 2454 return 0; 2455 } 2456 2457 /* Initialize each random sequence freelist per cache */ 2458 static void __init init_freelist_randomization(void) 2459 { 2460 struct kmem_cache *s; 2461 2462 mutex_lock(&slab_mutex); 2463 2464 list_for_each_entry(s, &slab_caches, list) 2465 init_cache_random_seq(s); 2466 2467 mutex_unlock(&slab_mutex); 2468 } 2469 2470 /* Get the next entry on the pre-computed freelist randomized */ 2471 static void *next_freelist_entry(struct kmem_cache *s, 2472 unsigned long *pos, void *start, 2473 unsigned long page_limit, 2474 unsigned long freelist_count) 2475 { 2476 unsigned int idx; 2477 2478 /* 2479 * If the target page allocation failed, the number of objects on the 2480 * page might be smaller than the usual size defined by the cache. 2481 */ 2482 do { 2483 idx = s->random_seq[*pos]; 2484 *pos += 1; 2485 if (*pos >= freelist_count) 2486 *pos = 0; 2487 } while (unlikely(idx >= page_limit)); 2488 2489 return (char *)start + idx; 2490 } 2491 2492 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 2493 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2494 { 2495 void *start; 2496 void *cur; 2497 void *next; 2498 unsigned long idx, pos, page_limit, freelist_count; 2499 2500 if (slab->objects < 2 || !s->random_seq) 2501 return false; 2502 2503 freelist_count = oo_objects(s->oo); 2504 pos = get_random_u32_below(freelist_count); 2505 2506 page_limit = slab->objects * s->size; 2507 start = fixup_red_left(s, slab_address(slab)); 2508 2509 /* First entry is used as the base of the freelist */ 2510 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); 2511 cur = setup_object(s, cur); 2512 slab->freelist = cur; 2513 2514 for (idx = 1; idx < slab->objects; idx++) { 2515 next = next_freelist_entry(s, &pos, start, page_limit, 2516 freelist_count); 2517 next = setup_object(s, next); 2518 set_freepointer(s, cur, next); 2519 cur = next; 2520 } 2521 set_freepointer(s, cur, NULL); 2522 2523 return true; 2524 } 2525 #else 2526 static inline int init_cache_random_seq(struct kmem_cache *s) 2527 { 2528 return 0; 2529 } 2530 static inline void init_freelist_randomization(void) { } 2531 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2532 { 2533 return false; 2534 } 2535 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2536 2537 static __always_inline void account_slab(struct slab *slab, int order, 2538 struct kmem_cache *s, gfp_t gfp) 2539 { 2540 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 2541 alloc_slab_obj_exts(slab, s, gfp, true); 2542 2543 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2544 PAGE_SIZE << order); 2545 } 2546 2547 static __always_inline void unaccount_slab(struct slab *slab, int order, 2548 struct kmem_cache *s) 2549 { 2550 if (memcg_kmem_online() || need_slab_obj_ext()) 2551 free_slab_obj_exts(slab); 2552 2553 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2554 -(PAGE_SIZE << order)); 2555 } 2556 2557 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 2558 { 2559 struct slab *slab; 2560 struct kmem_cache_order_objects oo = s->oo; 2561 gfp_t alloc_gfp; 2562 void *start, *p, *next; 2563 int idx; 2564 bool shuffle; 2565 2566 flags &= gfp_allowed_mask; 2567 2568 flags |= s->allocflags; 2569 2570 /* 2571 * Let the initial higher-order allocation fail under memory pressure 2572 * so we fall-back to the minimum order allocation. 2573 */ 2574 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 2575 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 2576 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 2577 2578 slab = alloc_slab_page(alloc_gfp, node, oo); 2579 if (unlikely(!slab)) { 2580 oo = s->min; 2581 alloc_gfp = flags; 2582 /* 2583 * Allocation may have failed due to fragmentation. 2584 * Try a lower order alloc if possible 2585 */ 2586 slab = alloc_slab_page(alloc_gfp, node, oo); 2587 if (unlikely(!slab)) 2588 return NULL; 2589 stat(s, ORDER_FALLBACK); 2590 } 2591 2592 slab->objects = oo_objects(oo); 2593 slab->inuse = 0; 2594 slab->frozen = 0; 2595 2596 account_slab(slab, oo_order(oo), s, flags); 2597 2598 slab->slab_cache = s; 2599 2600 kasan_poison_slab(slab); 2601 2602 start = slab_address(slab); 2603 2604 setup_slab_debug(s, slab, start); 2605 2606 shuffle = shuffle_freelist(s, slab); 2607 2608 if (!shuffle) { 2609 start = fixup_red_left(s, start); 2610 start = setup_object(s, start); 2611 slab->freelist = start; 2612 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 2613 next = p + s->size; 2614 next = setup_object(s, next); 2615 set_freepointer(s, p, next); 2616 p = next; 2617 } 2618 set_freepointer(s, p, NULL); 2619 } 2620 2621 return slab; 2622 } 2623 2624 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 2625 { 2626 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2627 flags = kmalloc_fix_flags(flags); 2628 2629 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2630 2631 return allocate_slab(s, 2632 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 2633 } 2634 2635 static void __free_slab(struct kmem_cache *s, struct slab *slab) 2636 { 2637 struct folio *folio = slab_folio(slab); 2638 int order = folio_order(folio); 2639 int pages = 1 << order; 2640 2641 __slab_clear_pfmemalloc(slab); 2642 folio->mapping = NULL; 2643 /* Make the mapping reset visible before clearing the flag */ 2644 smp_wmb(); 2645 __folio_clear_slab(folio); 2646 mm_account_reclaimed_pages(pages); 2647 unaccount_slab(slab, order, s); 2648 __free_pages(&folio->page, order); 2649 } 2650 2651 static void rcu_free_slab(struct rcu_head *h) 2652 { 2653 struct slab *slab = container_of(h, struct slab, rcu_head); 2654 2655 __free_slab(slab->slab_cache, slab); 2656 } 2657 2658 static void free_slab(struct kmem_cache *s, struct slab *slab) 2659 { 2660 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 2661 void *p; 2662 2663 slab_pad_check(s, slab); 2664 for_each_object(p, s, slab_address(slab), slab->objects) 2665 check_object(s, slab, p, SLUB_RED_INACTIVE); 2666 } 2667 2668 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 2669 call_rcu(&slab->rcu_head, rcu_free_slab); 2670 else 2671 __free_slab(s, slab); 2672 } 2673 2674 static void discard_slab(struct kmem_cache *s, struct slab *slab) 2675 { 2676 dec_slabs_node(s, slab_nid(slab), slab->objects); 2677 free_slab(s, slab); 2678 } 2679 2680 /* 2681 * SLUB reuses PG_workingset bit to keep track of whether it's on 2682 * the per-node partial list. 2683 */ 2684 static inline bool slab_test_node_partial(const struct slab *slab) 2685 { 2686 return folio_test_workingset(slab_folio(slab)); 2687 } 2688 2689 static inline void slab_set_node_partial(struct slab *slab) 2690 { 2691 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2692 } 2693 2694 static inline void slab_clear_node_partial(struct slab *slab) 2695 { 2696 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2697 } 2698 2699 /* 2700 * Management of partially allocated slabs. 2701 */ 2702 static inline void 2703 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 2704 { 2705 n->nr_partial++; 2706 if (tail == DEACTIVATE_TO_TAIL) 2707 list_add_tail(&slab->slab_list, &n->partial); 2708 else 2709 list_add(&slab->slab_list, &n->partial); 2710 slab_set_node_partial(slab); 2711 } 2712 2713 static inline void add_partial(struct kmem_cache_node *n, 2714 struct slab *slab, int tail) 2715 { 2716 lockdep_assert_held(&n->list_lock); 2717 __add_partial(n, slab, tail); 2718 } 2719 2720 static inline void remove_partial(struct kmem_cache_node *n, 2721 struct slab *slab) 2722 { 2723 lockdep_assert_held(&n->list_lock); 2724 list_del(&slab->slab_list); 2725 slab_clear_node_partial(slab); 2726 n->nr_partial--; 2727 } 2728 2729 /* 2730 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a 2731 * slab from the n->partial list. Remove only a single object from the slab, do 2732 * the alloc_debug_processing() checks and leave the slab on the list, or move 2733 * it to full list if it was the last free object. 2734 */ 2735 static void *alloc_single_from_partial(struct kmem_cache *s, 2736 struct kmem_cache_node *n, struct slab *slab, int orig_size) 2737 { 2738 void *object; 2739 2740 lockdep_assert_held(&n->list_lock); 2741 2742 object = slab->freelist; 2743 slab->freelist = get_freepointer(s, object); 2744 slab->inuse++; 2745 2746 if (!alloc_debug_processing(s, slab, object, orig_size)) { 2747 remove_partial(n, slab); 2748 return NULL; 2749 } 2750 2751 if (slab->inuse == slab->objects) { 2752 remove_partial(n, slab); 2753 add_full(s, n, slab); 2754 } 2755 2756 return object; 2757 } 2758 2759 /* 2760 * Called only for kmem_cache_debug() caches to allocate from a freshly 2761 * allocated slab. Allocate a single object instead of whole freelist 2762 * and put the slab to the partial (or full) list. 2763 */ 2764 static void *alloc_single_from_new_slab(struct kmem_cache *s, 2765 struct slab *slab, int orig_size) 2766 { 2767 int nid = slab_nid(slab); 2768 struct kmem_cache_node *n = get_node(s, nid); 2769 unsigned long flags; 2770 void *object; 2771 2772 2773 object = slab->freelist; 2774 slab->freelist = get_freepointer(s, object); 2775 slab->inuse = 1; 2776 2777 if (!alloc_debug_processing(s, slab, object, orig_size)) 2778 /* 2779 * It's not really expected that this would fail on a 2780 * freshly allocated slab, but a concurrent memory 2781 * corruption in theory could cause that. 2782 */ 2783 return NULL; 2784 2785 spin_lock_irqsave(&n->list_lock, flags); 2786 2787 if (slab->inuse == slab->objects) 2788 add_full(s, n, slab); 2789 else 2790 add_partial(n, slab, DEACTIVATE_TO_HEAD); 2791 2792 inc_slabs_node(s, nid, slab->objects); 2793 spin_unlock_irqrestore(&n->list_lock, flags); 2794 2795 return object; 2796 } 2797 2798 #ifdef CONFIG_SLUB_CPU_PARTIAL 2799 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 2800 #else 2801 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 2802 int drain) { } 2803 #endif 2804 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 2805 2806 /* 2807 * Try to allocate a partial slab from a specific node. 2808 */ 2809 static struct slab *get_partial_node(struct kmem_cache *s, 2810 struct kmem_cache_node *n, 2811 struct partial_context *pc) 2812 { 2813 struct slab *slab, *slab2, *partial = NULL; 2814 unsigned long flags; 2815 unsigned int partial_slabs = 0; 2816 2817 /* 2818 * Racy check. If we mistakenly see no partial slabs then we 2819 * just allocate an empty slab. If we mistakenly try to get a 2820 * partial slab and there is none available then get_partial() 2821 * will return NULL. 2822 */ 2823 if (!n || !n->nr_partial) 2824 return NULL; 2825 2826 spin_lock_irqsave(&n->list_lock, flags); 2827 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 2828 if (!pfmemalloc_match(slab, pc->flags)) 2829 continue; 2830 2831 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 2832 void *object = alloc_single_from_partial(s, n, slab, 2833 pc->orig_size); 2834 if (object) { 2835 partial = slab; 2836 pc->object = object; 2837 break; 2838 } 2839 continue; 2840 } 2841 2842 remove_partial(n, slab); 2843 2844 if (!partial) { 2845 partial = slab; 2846 stat(s, ALLOC_FROM_PARTIAL); 2847 2848 if ((slub_get_cpu_partial(s) == 0)) { 2849 break; 2850 } 2851 } else { 2852 put_cpu_partial(s, slab, 0); 2853 stat(s, CPU_PARTIAL_NODE); 2854 2855 if (++partial_slabs > slub_get_cpu_partial(s) / 2) { 2856 break; 2857 } 2858 } 2859 } 2860 spin_unlock_irqrestore(&n->list_lock, flags); 2861 return partial; 2862 } 2863 2864 /* 2865 * Get a slab from somewhere. Search in increasing NUMA distances. 2866 */ 2867 static struct slab *get_any_partial(struct kmem_cache *s, 2868 struct partial_context *pc) 2869 { 2870 #ifdef CONFIG_NUMA 2871 struct zonelist *zonelist; 2872 struct zoneref *z; 2873 struct zone *zone; 2874 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 2875 struct slab *slab; 2876 unsigned int cpuset_mems_cookie; 2877 2878 /* 2879 * The defrag ratio allows a configuration of the tradeoffs between 2880 * inter node defragmentation and node local allocations. A lower 2881 * defrag_ratio increases the tendency to do local allocations 2882 * instead of attempting to obtain partial slabs from other nodes. 2883 * 2884 * If the defrag_ratio is set to 0 then kmalloc() always 2885 * returns node local objects. If the ratio is higher then kmalloc() 2886 * may return off node objects because partial slabs are obtained 2887 * from other nodes and filled up. 2888 * 2889 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2890 * (which makes defrag_ratio = 1000) then every (well almost) 2891 * allocation will first attempt to defrag slab caches on other nodes. 2892 * This means scanning over all nodes to look for partial slabs which 2893 * may be expensive if we do it every time we are trying to find a slab 2894 * with available objects. 2895 */ 2896 if (!s->remote_node_defrag_ratio || 2897 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2898 return NULL; 2899 2900 do { 2901 cpuset_mems_cookie = read_mems_allowed_begin(); 2902 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 2903 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2904 struct kmem_cache_node *n; 2905 2906 n = get_node(s, zone_to_nid(zone)); 2907 2908 if (n && cpuset_zone_allowed(zone, pc->flags) && 2909 n->nr_partial > s->min_partial) { 2910 slab = get_partial_node(s, n, pc); 2911 if (slab) { 2912 /* 2913 * Don't check read_mems_allowed_retry() 2914 * here - if mems_allowed was updated in 2915 * parallel, that was a harmless race 2916 * between allocation and the cpuset 2917 * update 2918 */ 2919 return slab; 2920 } 2921 } 2922 } 2923 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2924 #endif /* CONFIG_NUMA */ 2925 return NULL; 2926 } 2927 2928 /* 2929 * Get a partial slab, lock it and return it. 2930 */ 2931 static struct slab *get_partial(struct kmem_cache *s, int node, 2932 struct partial_context *pc) 2933 { 2934 struct slab *slab; 2935 int searchnode = node; 2936 2937 if (node == NUMA_NO_NODE) 2938 searchnode = numa_mem_id(); 2939 2940 slab = get_partial_node(s, get_node(s, searchnode), pc); 2941 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) 2942 return slab; 2943 2944 return get_any_partial(s, pc); 2945 } 2946 2947 #ifndef CONFIG_SLUB_TINY 2948 2949 #ifdef CONFIG_PREEMPTION 2950 /* 2951 * Calculate the next globally unique transaction for disambiguation 2952 * during cmpxchg. The transactions start with the cpu number and are then 2953 * incremented by CONFIG_NR_CPUS. 2954 */ 2955 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2956 #else 2957 /* 2958 * No preemption supported therefore also no need to check for 2959 * different cpus. 2960 */ 2961 #define TID_STEP 1 2962 #endif /* CONFIG_PREEMPTION */ 2963 2964 static inline unsigned long next_tid(unsigned long tid) 2965 { 2966 return tid + TID_STEP; 2967 } 2968 2969 #ifdef SLUB_DEBUG_CMPXCHG 2970 static inline unsigned int tid_to_cpu(unsigned long tid) 2971 { 2972 return tid % TID_STEP; 2973 } 2974 2975 static inline unsigned long tid_to_event(unsigned long tid) 2976 { 2977 return tid / TID_STEP; 2978 } 2979 #endif 2980 2981 static inline unsigned int init_tid(int cpu) 2982 { 2983 return cpu; 2984 } 2985 2986 static inline void note_cmpxchg_failure(const char *n, 2987 const struct kmem_cache *s, unsigned long tid) 2988 { 2989 #ifdef SLUB_DEBUG_CMPXCHG 2990 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2991 2992 pr_info("%s %s: cmpxchg redo ", n, s->name); 2993 2994 #ifdef CONFIG_PREEMPTION 2995 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2996 pr_warn("due to cpu change %d -> %d\n", 2997 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2998 else 2999 #endif 3000 if (tid_to_event(tid) != tid_to_event(actual_tid)) 3001 pr_warn("due to cpu running other code. Event %ld->%ld\n", 3002 tid_to_event(tid), tid_to_event(actual_tid)); 3003 else 3004 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 3005 actual_tid, tid, next_tid(tid)); 3006 #endif 3007 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 3008 } 3009 3010 static void init_kmem_cache_cpus(struct kmem_cache *s) 3011 { 3012 int cpu; 3013 struct kmem_cache_cpu *c; 3014 3015 for_each_possible_cpu(cpu) { 3016 c = per_cpu_ptr(s->cpu_slab, cpu); 3017 local_lock_init(&c->lock); 3018 c->tid = init_tid(cpu); 3019 } 3020 } 3021 3022 /* 3023 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 3024 * unfreezes the slabs and puts it on the proper list. 3025 * Assumes the slab has been already safely taken away from kmem_cache_cpu 3026 * by the caller. 3027 */ 3028 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 3029 void *freelist) 3030 { 3031 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 3032 int free_delta = 0; 3033 void *nextfree, *freelist_iter, *freelist_tail; 3034 int tail = DEACTIVATE_TO_HEAD; 3035 unsigned long flags = 0; 3036 struct slab new; 3037 struct slab old; 3038 3039 if (READ_ONCE(slab->freelist)) { 3040 stat(s, DEACTIVATE_REMOTE_FREES); 3041 tail = DEACTIVATE_TO_TAIL; 3042 } 3043 3044 /* 3045 * Stage one: Count the objects on cpu's freelist as free_delta and 3046 * remember the last object in freelist_tail for later splicing. 3047 */ 3048 freelist_tail = NULL; 3049 freelist_iter = freelist; 3050 while (freelist_iter) { 3051 nextfree = get_freepointer(s, freelist_iter); 3052 3053 /* 3054 * If 'nextfree' is invalid, it is possible that the object at 3055 * 'freelist_iter' is already corrupted. So isolate all objects 3056 * starting at 'freelist_iter' by skipping them. 3057 */ 3058 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 3059 break; 3060 3061 freelist_tail = freelist_iter; 3062 free_delta++; 3063 3064 freelist_iter = nextfree; 3065 } 3066 3067 /* 3068 * Stage two: Unfreeze the slab while splicing the per-cpu 3069 * freelist to the head of slab's freelist. 3070 */ 3071 do { 3072 old.freelist = READ_ONCE(slab->freelist); 3073 old.counters = READ_ONCE(slab->counters); 3074 VM_BUG_ON(!old.frozen); 3075 3076 /* Determine target state of the slab */ 3077 new.counters = old.counters; 3078 new.frozen = 0; 3079 if (freelist_tail) { 3080 new.inuse -= free_delta; 3081 set_freepointer(s, freelist_tail, old.freelist); 3082 new.freelist = freelist; 3083 } else { 3084 new.freelist = old.freelist; 3085 } 3086 } while (!slab_update_freelist(s, slab, 3087 old.freelist, old.counters, 3088 new.freelist, new.counters, 3089 "unfreezing slab")); 3090 3091 /* 3092 * Stage three: Manipulate the slab list based on the updated state. 3093 */ 3094 if (!new.inuse && n->nr_partial >= s->min_partial) { 3095 stat(s, DEACTIVATE_EMPTY); 3096 discard_slab(s, slab); 3097 stat(s, FREE_SLAB); 3098 } else if (new.freelist) { 3099 spin_lock_irqsave(&n->list_lock, flags); 3100 add_partial(n, slab, tail); 3101 spin_unlock_irqrestore(&n->list_lock, flags); 3102 stat(s, tail); 3103 } else { 3104 stat(s, DEACTIVATE_FULL); 3105 } 3106 } 3107 3108 #ifdef CONFIG_SLUB_CPU_PARTIAL 3109 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) 3110 { 3111 struct kmem_cache_node *n = NULL, *n2 = NULL; 3112 struct slab *slab, *slab_to_discard = NULL; 3113 unsigned long flags = 0; 3114 3115 while (partial_slab) { 3116 slab = partial_slab; 3117 partial_slab = slab->next; 3118 3119 n2 = get_node(s, slab_nid(slab)); 3120 if (n != n2) { 3121 if (n) 3122 spin_unlock_irqrestore(&n->list_lock, flags); 3123 3124 n = n2; 3125 spin_lock_irqsave(&n->list_lock, flags); 3126 } 3127 3128 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { 3129 slab->next = slab_to_discard; 3130 slab_to_discard = slab; 3131 } else { 3132 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3133 stat(s, FREE_ADD_PARTIAL); 3134 } 3135 } 3136 3137 if (n) 3138 spin_unlock_irqrestore(&n->list_lock, flags); 3139 3140 while (slab_to_discard) { 3141 slab = slab_to_discard; 3142 slab_to_discard = slab_to_discard->next; 3143 3144 stat(s, DEACTIVATE_EMPTY); 3145 discard_slab(s, slab); 3146 stat(s, FREE_SLAB); 3147 } 3148 } 3149 3150 /* 3151 * Put all the cpu partial slabs to the node partial list. 3152 */ 3153 static void put_partials(struct kmem_cache *s) 3154 { 3155 struct slab *partial_slab; 3156 unsigned long flags; 3157 3158 local_lock_irqsave(&s->cpu_slab->lock, flags); 3159 partial_slab = this_cpu_read(s->cpu_slab->partial); 3160 this_cpu_write(s->cpu_slab->partial, NULL); 3161 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3162 3163 if (partial_slab) 3164 __put_partials(s, partial_slab); 3165 } 3166 3167 static void put_partials_cpu(struct kmem_cache *s, 3168 struct kmem_cache_cpu *c) 3169 { 3170 struct slab *partial_slab; 3171 3172 partial_slab = slub_percpu_partial(c); 3173 c->partial = NULL; 3174 3175 if (partial_slab) 3176 __put_partials(s, partial_slab); 3177 } 3178 3179 /* 3180 * Put a slab into a partial slab slot if available. 3181 * 3182 * If we did not find a slot then simply move all the partials to the 3183 * per node partial list. 3184 */ 3185 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 3186 { 3187 struct slab *oldslab; 3188 struct slab *slab_to_put = NULL; 3189 unsigned long flags; 3190 int slabs = 0; 3191 3192 local_lock_irqsave(&s->cpu_slab->lock, flags); 3193 3194 oldslab = this_cpu_read(s->cpu_slab->partial); 3195 3196 if (oldslab) { 3197 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 3198 /* 3199 * Partial array is full. Move the existing set to the 3200 * per node partial list. Postpone the actual unfreezing 3201 * outside of the critical section. 3202 */ 3203 slab_to_put = oldslab; 3204 oldslab = NULL; 3205 } else { 3206 slabs = oldslab->slabs; 3207 } 3208 } 3209 3210 slabs++; 3211 3212 slab->slabs = slabs; 3213 slab->next = oldslab; 3214 3215 this_cpu_write(s->cpu_slab->partial, slab); 3216 3217 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3218 3219 if (slab_to_put) { 3220 __put_partials(s, slab_to_put); 3221 stat(s, CPU_PARTIAL_DRAIN); 3222 } 3223 } 3224 3225 #else /* CONFIG_SLUB_CPU_PARTIAL */ 3226 3227 static inline void put_partials(struct kmem_cache *s) { } 3228 static inline void put_partials_cpu(struct kmem_cache *s, 3229 struct kmem_cache_cpu *c) { } 3230 3231 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 3232 3233 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 3234 { 3235 unsigned long flags; 3236 struct slab *slab; 3237 void *freelist; 3238 3239 local_lock_irqsave(&s->cpu_slab->lock, flags); 3240 3241 slab = c->slab; 3242 freelist = c->freelist; 3243 3244 c->slab = NULL; 3245 c->freelist = NULL; 3246 c->tid = next_tid(c->tid); 3247 3248 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3249 3250 if (slab) { 3251 deactivate_slab(s, slab, freelist); 3252 stat(s, CPUSLAB_FLUSH); 3253 } 3254 } 3255 3256 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 3257 { 3258 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3259 void *freelist = c->freelist; 3260 struct slab *slab = c->slab; 3261 3262 c->slab = NULL; 3263 c->freelist = NULL; 3264 c->tid = next_tid(c->tid); 3265 3266 if (slab) { 3267 deactivate_slab(s, slab, freelist); 3268 stat(s, CPUSLAB_FLUSH); 3269 } 3270 3271 put_partials_cpu(s, c); 3272 } 3273 3274 struct slub_flush_work { 3275 struct work_struct work; 3276 struct kmem_cache *s; 3277 bool skip; 3278 }; 3279 3280 /* 3281 * Flush cpu slab. 3282 * 3283 * Called from CPU work handler with migration disabled. 3284 */ 3285 static void flush_cpu_slab(struct work_struct *w) 3286 { 3287 struct kmem_cache *s; 3288 struct kmem_cache_cpu *c; 3289 struct slub_flush_work *sfw; 3290 3291 sfw = container_of(w, struct slub_flush_work, work); 3292 3293 s = sfw->s; 3294 c = this_cpu_ptr(s->cpu_slab); 3295 3296 if (c->slab) 3297 flush_slab(s, c); 3298 3299 put_partials(s); 3300 } 3301 3302 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 3303 { 3304 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3305 3306 return c->slab || slub_percpu_partial(c); 3307 } 3308 3309 static DEFINE_MUTEX(flush_lock); 3310 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 3311 3312 static void flush_all_cpus_locked(struct kmem_cache *s) 3313 { 3314 struct slub_flush_work *sfw; 3315 unsigned int cpu; 3316 3317 lockdep_assert_cpus_held(); 3318 mutex_lock(&flush_lock); 3319 3320 for_each_online_cpu(cpu) { 3321 sfw = &per_cpu(slub_flush, cpu); 3322 if (!has_cpu_slab(cpu, s)) { 3323 sfw->skip = true; 3324 continue; 3325 } 3326 INIT_WORK(&sfw->work, flush_cpu_slab); 3327 sfw->skip = false; 3328 sfw->s = s; 3329 queue_work_on(cpu, flushwq, &sfw->work); 3330 } 3331 3332 for_each_online_cpu(cpu) { 3333 sfw = &per_cpu(slub_flush, cpu); 3334 if (sfw->skip) 3335 continue; 3336 flush_work(&sfw->work); 3337 } 3338 3339 mutex_unlock(&flush_lock); 3340 } 3341 3342 static void flush_all(struct kmem_cache *s) 3343 { 3344 cpus_read_lock(); 3345 flush_all_cpus_locked(s); 3346 cpus_read_unlock(); 3347 } 3348 3349 /* 3350 * Use the cpu notifier to insure that the cpu slabs are flushed when 3351 * necessary. 3352 */ 3353 static int slub_cpu_dead(unsigned int cpu) 3354 { 3355 struct kmem_cache *s; 3356 3357 mutex_lock(&slab_mutex); 3358 list_for_each_entry(s, &slab_caches, list) 3359 __flush_cpu_slab(s, cpu); 3360 mutex_unlock(&slab_mutex); 3361 return 0; 3362 } 3363 3364 #else /* CONFIG_SLUB_TINY */ 3365 static inline void flush_all_cpus_locked(struct kmem_cache *s) { } 3366 static inline void flush_all(struct kmem_cache *s) { } 3367 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } 3368 static inline int slub_cpu_dead(unsigned int cpu) { return 0; } 3369 #endif /* CONFIG_SLUB_TINY */ 3370 3371 /* 3372 * Check if the objects in a per cpu structure fit numa 3373 * locality expectations. 3374 */ 3375 static inline int node_match(struct slab *slab, int node) 3376 { 3377 #ifdef CONFIG_NUMA 3378 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 3379 return 0; 3380 #endif 3381 return 1; 3382 } 3383 3384 #ifdef CONFIG_SLUB_DEBUG 3385 static int count_free(struct slab *slab) 3386 { 3387 return slab->objects - slab->inuse; 3388 } 3389 3390 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 3391 { 3392 return atomic_long_read(&n->total_objects); 3393 } 3394 3395 /* Supports checking bulk free of a constructed freelist */ 3396 static inline bool free_debug_processing(struct kmem_cache *s, 3397 struct slab *slab, void *head, void *tail, int *bulk_cnt, 3398 unsigned long addr, depot_stack_handle_t handle) 3399 { 3400 bool checks_ok = false; 3401 void *object = head; 3402 int cnt = 0; 3403 3404 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3405 if (!check_slab(s, slab)) 3406 goto out; 3407 } 3408 3409 if (slab->inuse < *bulk_cnt) { 3410 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 3411 slab->inuse, *bulk_cnt); 3412 goto out; 3413 } 3414 3415 next_object: 3416 3417 if (++cnt > *bulk_cnt) 3418 goto out_cnt; 3419 3420 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3421 if (!free_consistency_checks(s, slab, object, addr)) 3422 goto out; 3423 } 3424 3425 if (s->flags & SLAB_STORE_USER) 3426 set_track_update(s, object, TRACK_FREE, addr, handle); 3427 trace(s, slab, object, 0); 3428 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 3429 init_object(s, object, SLUB_RED_INACTIVE); 3430 3431 /* Reached end of constructed freelist yet? */ 3432 if (object != tail) { 3433 object = get_freepointer(s, object); 3434 goto next_object; 3435 } 3436 checks_ok = true; 3437 3438 out_cnt: 3439 if (cnt != *bulk_cnt) { 3440 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 3441 *bulk_cnt, cnt); 3442 *bulk_cnt = cnt; 3443 } 3444 3445 out: 3446 3447 if (!checks_ok) 3448 slab_fix(s, "Object at 0x%p not freed", object); 3449 3450 return checks_ok; 3451 } 3452 #endif /* CONFIG_SLUB_DEBUG */ 3453 3454 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 3455 static unsigned long count_partial(struct kmem_cache_node *n, 3456 int (*get_count)(struct slab *)) 3457 { 3458 unsigned long flags; 3459 unsigned long x = 0; 3460 struct slab *slab; 3461 3462 spin_lock_irqsave(&n->list_lock, flags); 3463 list_for_each_entry(slab, &n->partial, slab_list) 3464 x += get_count(slab); 3465 spin_unlock_irqrestore(&n->list_lock, flags); 3466 return x; 3467 } 3468 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 3469 3470 #ifdef CONFIG_SLUB_DEBUG 3471 #define MAX_PARTIAL_TO_SCAN 10000 3472 3473 static unsigned long count_partial_free_approx(struct kmem_cache_node *n) 3474 { 3475 unsigned long flags; 3476 unsigned long x = 0; 3477 struct slab *slab; 3478 3479 spin_lock_irqsave(&n->list_lock, flags); 3480 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) { 3481 list_for_each_entry(slab, &n->partial, slab_list) 3482 x += slab->objects - slab->inuse; 3483 } else { 3484 /* 3485 * For a long list, approximate the total count of objects in 3486 * it to meet the limit on the number of slabs to scan. 3487 * Scan from both the list's head and tail for better accuracy. 3488 */ 3489 unsigned long scanned = 0; 3490 3491 list_for_each_entry(slab, &n->partial, slab_list) { 3492 x += slab->objects - slab->inuse; 3493 if (++scanned == MAX_PARTIAL_TO_SCAN / 2) 3494 break; 3495 } 3496 list_for_each_entry_reverse(slab, &n->partial, slab_list) { 3497 x += slab->objects - slab->inuse; 3498 if (++scanned == MAX_PARTIAL_TO_SCAN) 3499 break; 3500 } 3501 x = mult_frac(x, n->nr_partial, scanned); 3502 x = min(x, node_nr_objs(n)); 3503 } 3504 spin_unlock_irqrestore(&n->list_lock, flags); 3505 return x; 3506 } 3507 3508 static noinline void 3509 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 3510 { 3511 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 3512 DEFAULT_RATELIMIT_BURST); 3513 int cpu = raw_smp_processor_id(); 3514 int node; 3515 struct kmem_cache_node *n; 3516 3517 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 3518 return; 3519 3520 pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n", 3521 cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags); 3522 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 3523 s->name, s->object_size, s->size, oo_order(s->oo), 3524 oo_order(s->min)); 3525 3526 if (oo_order(s->min) > get_order(s->object_size)) 3527 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", 3528 s->name); 3529 3530 for_each_kmem_cache_node(s, node, n) { 3531 unsigned long nr_slabs; 3532 unsigned long nr_objs; 3533 unsigned long nr_free; 3534 3535 nr_free = count_partial_free_approx(n); 3536 nr_slabs = node_nr_slabs(n); 3537 nr_objs = node_nr_objs(n); 3538 3539 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 3540 node, nr_slabs, nr_objs, nr_free); 3541 } 3542 } 3543 #else /* CONFIG_SLUB_DEBUG */ 3544 static inline void 3545 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 3546 #endif 3547 3548 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 3549 { 3550 if (unlikely(slab_test_pfmemalloc(slab))) 3551 return gfp_pfmemalloc_allowed(gfpflags); 3552 3553 return true; 3554 } 3555 3556 #ifndef CONFIG_SLUB_TINY 3557 static inline bool 3558 __update_cpu_freelist_fast(struct kmem_cache *s, 3559 void *freelist_old, void *freelist_new, 3560 unsigned long tid) 3561 { 3562 freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; 3563 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; 3564 3565 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, 3566 &old.full, new.full); 3567 } 3568 3569 /* 3570 * Check the slab->freelist and either transfer the freelist to the 3571 * per cpu freelist or deactivate the slab. 3572 * 3573 * The slab is still frozen if the return value is not NULL. 3574 * 3575 * If this function returns NULL then the slab has been unfrozen. 3576 */ 3577 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 3578 { 3579 struct slab new; 3580 unsigned long counters; 3581 void *freelist; 3582 3583 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3584 3585 do { 3586 freelist = slab->freelist; 3587 counters = slab->counters; 3588 3589 new.counters = counters; 3590 3591 new.inuse = slab->objects; 3592 new.frozen = freelist != NULL; 3593 3594 } while (!__slab_update_freelist(s, slab, 3595 freelist, counters, 3596 NULL, new.counters, 3597 "get_freelist")); 3598 3599 return freelist; 3600 } 3601 3602 /* 3603 * Freeze the partial slab and return the pointer to the freelist. 3604 */ 3605 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) 3606 { 3607 struct slab new; 3608 unsigned long counters; 3609 void *freelist; 3610 3611 do { 3612 freelist = slab->freelist; 3613 counters = slab->counters; 3614 3615 new.counters = counters; 3616 VM_BUG_ON(new.frozen); 3617 3618 new.inuse = slab->objects; 3619 new.frozen = 1; 3620 3621 } while (!slab_update_freelist(s, slab, 3622 freelist, counters, 3623 NULL, new.counters, 3624 "freeze_slab")); 3625 3626 return freelist; 3627 } 3628 3629 /* 3630 * Slow path. The lockless freelist is empty or we need to perform 3631 * debugging duties. 3632 * 3633 * Processing is still very fast if new objects have been freed to the 3634 * regular freelist. In that case we simply take over the regular freelist 3635 * as the lockless freelist and zap the regular freelist. 3636 * 3637 * If that is not working then we fall back to the partial lists. We take the 3638 * first element of the freelist as the object to allocate now and move the 3639 * rest of the freelist to the lockless freelist. 3640 * 3641 * And if we were unable to get a new slab from the partial slab lists then 3642 * we need to allocate a new slab. This is the slowest path since it involves 3643 * a call to the page allocator and the setup of a new slab. 3644 * 3645 * Version of __slab_alloc to use when we know that preemption is 3646 * already disabled (which is the case for bulk allocation). 3647 */ 3648 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3649 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3650 { 3651 void *freelist; 3652 struct slab *slab; 3653 unsigned long flags; 3654 struct partial_context pc; 3655 bool try_thisnode = true; 3656 3657 stat(s, ALLOC_SLOWPATH); 3658 3659 reread_slab: 3660 3661 slab = READ_ONCE(c->slab); 3662 if (!slab) { 3663 /* 3664 * if the node is not online or has no normal memory, just 3665 * ignore the node constraint 3666 */ 3667 if (unlikely(node != NUMA_NO_NODE && 3668 !node_isset(node, slab_nodes))) 3669 node = NUMA_NO_NODE; 3670 goto new_slab; 3671 } 3672 3673 if (unlikely(!node_match(slab, node))) { 3674 /* 3675 * same as above but node_match() being false already 3676 * implies node != NUMA_NO_NODE 3677 */ 3678 if (!node_isset(node, slab_nodes)) { 3679 node = NUMA_NO_NODE; 3680 } else { 3681 stat(s, ALLOC_NODE_MISMATCH); 3682 goto deactivate_slab; 3683 } 3684 } 3685 3686 /* 3687 * By rights, we should be searching for a slab page that was 3688 * PFMEMALLOC but right now, we are losing the pfmemalloc 3689 * information when the page leaves the per-cpu allocator 3690 */ 3691 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 3692 goto deactivate_slab; 3693 3694 /* must check again c->slab in case we got preempted and it changed */ 3695 local_lock_irqsave(&s->cpu_slab->lock, flags); 3696 if (unlikely(slab != c->slab)) { 3697 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3698 goto reread_slab; 3699 } 3700 freelist = c->freelist; 3701 if (freelist) 3702 goto load_freelist; 3703 3704 freelist = get_freelist(s, slab); 3705 3706 if (!freelist) { 3707 c->slab = NULL; 3708 c->tid = next_tid(c->tid); 3709 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3710 stat(s, DEACTIVATE_BYPASS); 3711 goto new_slab; 3712 } 3713 3714 stat(s, ALLOC_REFILL); 3715 3716 load_freelist: 3717 3718 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3719 3720 /* 3721 * freelist is pointing to the list of objects to be used. 3722 * slab is pointing to the slab from which the objects are obtained. 3723 * That slab must be frozen for per cpu allocations to work. 3724 */ 3725 VM_BUG_ON(!c->slab->frozen); 3726 c->freelist = get_freepointer(s, freelist); 3727 c->tid = next_tid(c->tid); 3728 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3729 return freelist; 3730 3731 deactivate_slab: 3732 3733 local_lock_irqsave(&s->cpu_slab->lock, flags); 3734 if (slab != c->slab) { 3735 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3736 goto reread_slab; 3737 } 3738 freelist = c->freelist; 3739 c->slab = NULL; 3740 c->freelist = NULL; 3741 c->tid = next_tid(c->tid); 3742 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3743 deactivate_slab(s, slab, freelist); 3744 3745 new_slab: 3746 3747 #ifdef CONFIG_SLUB_CPU_PARTIAL 3748 while (slub_percpu_partial(c)) { 3749 local_lock_irqsave(&s->cpu_slab->lock, flags); 3750 if (unlikely(c->slab)) { 3751 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3752 goto reread_slab; 3753 } 3754 if (unlikely(!slub_percpu_partial(c))) { 3755 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3756 /* we were preempted and partial list got empty */ 3757 goto new_objects; 3758 } 3759 3760 slab = slub_percpu_partial(c); 3761 slub_set_percpu_partial(c, slab); 3762 3763 if (likely(node_match(slab, node) && 3764 pfmemalloc_match(slab, gfpflags))) { 3765 c->slab = slab; 3766 freelist = get_freelist(s, slab); 3767 VM_BUG_ON(!freelist); 3768 stat(s, CPU_PARTIAL_ALLOC); 3769 goto load_freelist; 3770 } 3771 3772 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3773 3774 slab->next = NULL; 3775 __put_partials(s, slab); 3776 } 3777 #endif 3778 3779 new_objects: 3780 3781 pc.flags = gfpflags; 3782 /* 3783 * When a preferred node is indicated but no __GFP_THISNODE 3784 * 3785 * 1) try to get a partial slab from target node only by having 3786 * __GFP_THISNODE in pc.flags for get_partial() 3787 * 2) if 1) failed, try to allocate a new slab from target node with 3788 * GPF_NOWAIT | __GFP_THISNODE opportunistically 3789 * 3) if 2) failed, retry with original gfpflags which will allow 3790 * get_partial() try partial lists of other nodes before potentially 3791 * allocating new page from other nodes 3792 */ 3793 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 3794 && try_thisnode)) 3795 pc.flags = GFP_NOWAIT | __GFP_THISNODE; 3796 3797 pc.orig_size = orig_size; 3798 slab = get_partial(s, node, &pc); 3799 if (slab) { 3800 if (kmem_cache_debug(s)) { 3801 freelist = pc.object; 3802 /* 3803 * For debug caches here we had to go through 3804 * alloc_single_from_partial() so just store the 3805 * tracking info and return the object. 3806 */ 3807 if (s->flags & SLAB_STORE_USER) 3808 set_track(s, freelist, TRACK_ALLOC, addr); 3809 3810 return freelist; 3811 } 3812 3813 freelist = freeze_slab(s, slab); 3814 goto retry_load_slab; 3815 } 3816 3817 slub_put_cpu_ptr(s->cpu_slab); 3818 slab = new_slab(s, pc.flags, node); 3819 c = slub_get_cpu_ptr(s->cpu_slab); 3820 3821 if (unlikely(!slab)) { 3822 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 3823 && try_thisnode) { 3824 try_thisnode = false; 3825 goto new_objects; 3826 } 3827 slab_out_of_memory(s, gfpflags, node); 3828 return NULL; 3829 } 3830 3831 stat(s, ALLOC_SLAB); 3832 3833 if (kmem_cache_debug(s)) { 3834 freelist = alloc_single_from_new_slab(s, slab, orig_size); 3835 3836 if (unlikely(!freelist)) 3837 goto new_objects; 3838 3839 if (s->flags & SLAB_STORE_USER) 3840 set_track(s, freelist, TRACK_ALLOC, addr); 3841 3842 return freelist; 3843 } 3844 3845 /* 3846 * No other reference to the slab yet so we can 3847 * muck around with it freely without cmpxchg 3848 */ 3849 freelist = slab->freelist; 3850 slab->freelist = NULL; 3851 slab->inuse = slab->objects; 3852 slab->frozen = 1; 3853 3854 inc_slabs_node(s, slab_nid(slab), slab->objects); 3855 3856 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { 3857 /* 3858 * For !pfmemalloc_match() case we don't load freelist so that 3859 * we don't make further mismatched allocations easier. 3860 */ 3861 deactivate_slab(s, slab, get_freepointer(s, freelist)); 3862 return freelist; 3863 } 3864 3865 retry_load_slab: 3866 3867 local_lock_irqsave(&s->cpu_slab->lock, flags); 3868 if (unlikely(c->slab)) { 3869 void *flush_freelist = c->freelist; 3870 struct slab *flush_slab = c->slab; 3871 3872 c->slab = NULL; 3873 c->freelist = NULL; 3874 c->tid = next_tid(c->tid); 3875 3876 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3877 3878 deactivate_slab(s, flush_slab, flush_freelist); 3879 3880 stat(s, CPUSLAB_FLUSH); 3881 3882 goto retry_load_slab; 3883 } 3884 c->slab = slab; 3885 3886 goto load_freelist; 3887 } 3888 3889 /* 3890 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3891 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3892 * pointer. 3893 */ 3894 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3895 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3896 { 3897 void *p; 3898 3899 #ifdef CONFIG_PREEMPT_COUNT 3900 /* 3901 * We may have been preempted and rescheduled on a different 3902 * cpu before disabling preemption. Need to reload cpu area 3903 * pointer. 3904 */ 3905 c = slub_get_cpu_ptr(s->cpu_slab); 3906 #endif 3907 3908 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 3909 #ifdef CONFIG_PREEMPT_COUNT 3910 slub_put_cpu_ptr(s->cpu_slab); 3911 #endif 3912 return p; 3913 } 3914 3915 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 3916 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3917 { 3918 struct kmem_cache_cpu *c; 3919 struct slab *slab; 3920 unsigned long tid; 3921 void *object; 3922 3923 redo: 3924 /* 3925 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3926 * enabled. We may switch back and forth between cpus while 3927 * reading from one cpu area. That does not matter as long 3928 * as we end up on the original cpu again when doing the cmpxchg. 3929 * 3930 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3931 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3932 * the tid. If we are preempted and switched to another cpu between the 3933 * two reads, it's OK as the two are still associated with the same cpu 3934 * and cmpxchg later will validate the cpu. 3935 */ 3936 c = raw_cpu_ptr(s->cpu_slab); 3937 tid = READ_ONCE(c->tid); 3938 3939 /* 3940 * Irqless object alloc/free algorithm used here depends on sequence 3941 * of fetching cpu_slab's data. tid should be fetched before anything 3942 * on c to guarantee that object and slab associated with previous tid 3943 * won't be used with current tid. If we fetch tid first, object and 3944 * slab could be one associated with next tid and our alloc/free 3945 * request will be failed. In this case, we will retry. So, no problem. 3946 */ 3947 barrier(); 3948 3949 /* 3950 * The transaction ids are globally unique per cpu and per operation on 3951 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3952 * occurs on the right processor and that there was no operation on the 3953 * linked list in between. 3954 */ 3955 3956 object = c->freelist; 3957 slab = c->slab; 3958 3959 if (!USE_LOCKLESS_FAST_PATH() || 3960 unlikely(!object || !slab || !node_match(slab, node))) { 3961 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 3962 } else { 3963 void *next_object = get_freepointer_safe(s, object); 3964 3965 /* 3966 * The cmpxchg will only match if there was no additional 3967 * operation and if we are on the right processor. 3968 * 3969 * The cmpxchg does the following atomically (without lock 3970 * semantics!) 3971 * 1. Relocate first pointer to the current per cpu area. 3972 * 2. Verify that tid and freelist have not been changed 3973 * 3. If they were not changed replace tid and freelist 3974 * 3975 * Since this is without lock semantics the protection is only 3976 * against code executing on this cpu *not* from access by 3977 * other cpus. 3978 */ 3979 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 3980 note_cmpxchg_failure("slab_alloc", s, tid); 3981 goto redo; 3982 } 3983 prefetch_freepointer(s, next_object); 3984 stat(s, ALLOC_FASTPATH); 3985 } 3986 3987 return object; 3988 } 3989 #else /* CONFIG_SLUB_TINY */ 3990 static void *__slab_alloc_node(struct kmem_cache *s, 3991 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3992 { 3993 struct partial_context pc; 3994 struct slab *slab; 3995 void *object; 3996 3997 pc.flags = gfpflags; 3998 pc.orig_size = orig_size; 3999 slab = get_partial(s, node, &pc); 4000 4001 if (slab) 4002 return pc.object; 4003 4004 slab = new_slab(s, gfpflags, node); 4005 if (unlikely(!slab)) { 4006 slab_out_of_memory(s, gfpflags, node); 4007 return NULL; 4008 } 4009 4010 object = alloc_single_from_new_slab(s, slab, orig_size); 4011 4012 return object; 4013 } 4014 #endif /* CONFIG_SLUB_TINY */ 4015 4016 /* 4017 * If the object has been wiped upon free, make sure it's fully initialized by 4018 * zeroing out freelist pointer. 4019 * 4020 * Note that we also wipe custom freelist pointers. 4021 */ 4022 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 4023 void *obj) 4024 { 4025 if (unlikely(slab_want_init_on_free(s)) && obj && 4026 !freeptr_outside_object(s)) 4027 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 4028 0, sizeof(void *)); 4029 } 4030 4031 static __fastpath_inline 4032 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 4033 { 4034 flags &= gfp_allowed_mask; 4035 4036 might_alloc(flags); 4037 4038 if (unlikely(should_failslab(s, flags))) 4039 return NULL; 4040 4041 return s; 4042 } 4043 4044 static __fastpath_inline 4045 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 4046 gfp_t flags, size_t size, void **p, bool init, 4047 unsigned int orig_size) 4048 { 4049 unsigned int zero_size = s->object_size; 4050 bool kasan_init = init; 4051 size_t i; 4052 gfp_t init_flags = flags & gfp_allowed_mask; 4053 4054 /* 4055 * For kmalloc object, the allocated memory size(object_size) is likely 4056 * larger than the requested size(orig_size). If redzone check is 4057 * enabled for the extra space, don't zero it, as it will be redzoned 4058 * soon. The redzone operation for this extra space could be seen as a 4059 * replacement of current poisoning under certain debug option, and 4060 * won't break other sanity checks. 4061 */ 4062 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 4063 (s->flags & SLAB_KMALLOC)) 4064 zero_size = orig_size; 4065 4066 /* 4067 * When slab_debug is enabled, avoid memory initialization integrated 4068 * into KASAN and instead zero out the memory via the memset below with 4069 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and 4070 * cause false-positive reports. This does not lead to a performance 4071 * penalty on production builds, as slab_debug is not intended to be 4072 * enabled there. 4073 */ 4074 if (__slub_debug_enabled()) 4075 kasan_init = false; 4076 4077 /* 4078 * As memory initialization might be integrated into KASAN, 4079 * kasan_slab_alloc and initialization memset must be 4080 * kept together to avoid discrepancies in behavior. 4081 * 4082 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 4083 */ 4084 for (i = 0; i < size; i++) { 4085 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); 4086 if (p[i] && init && (!kasan_init || 4087 !kasan_has_integrated_init())) 4088 memset(p[i], 0, zero_size); 4089 kmemleak_alloc_recursive(p[i], s->object_size, 1, 4090 s->flags, init_flags); 4091 kmsan_slab_alloc(s, p[i], init_flags); 4092 alloc_tagging_slab_alloc_hook(s, p[i], flags); 4093 } 4094 4095 return memcg_slab_post_alloc_hook(s, lru, flags, size, p); 4096 } 4097 4098 /* 4099 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 4100 * have the fastpath folded into their functions. So no function call 4101 * overhead for requests that can be satisfied on the fastpath. 4102 * 4103 * The fastpath works by first checking if the lockless freelist can be used. 4104 * If not then __slab_alloc is called for slow processing. 4105 * 4106 * Otherwise we can simply pick the next object from the lockless free list. 4107 */ 4108 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 4109 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 4110 { 4111 void *object; 4112 bool init = false; 4113 4114 s = slab_pre_alloc_hook(s, gfpflags); 4115 if (unlikely(!s)) 4116 return NULL; 4117 4118 object = kfence_alloc(s, orig_size, gfpflags); 4119 if (unlikely(object)) 4120 goto out; 4121 4122 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 4123 4124 maybe_wipe_obj_freeptr(s, object); 4125 init = slab_want_init_on_alloc(gfpflags, s); 4126 4127 out: 4128 /* 4129 * When init equals 'true', like for kzalloc() family, only 4130 * @orig_size bytes might be zeroed instead of s->object_size 4131 * In case this fails due to memcg_slab_post_alloc_hook(), 4132 * object is set to NULL 4133 */ 4134 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size); 4135 4136 return object; 4137 } 4138 4139 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags) 4140 { 4141 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, 4142 s->object_size); 4143 4144 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4145 4146 return ret; 4147 } 4148 EXPORT_SYMBOL(kmem_cache_alloc_noprof); 4149 4150 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, 4151 gfp_t gfpflags) 4152 { 4153 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, 4154 s->object_size); 4155 4156 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4157 4158 return ret; 4159 } 4160 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); 4161 4162 bool kmem_cache_charge(void *objp, gfp_t gfpflags) 4163 { 4164 if (!memcg_kmem_online()) 4165 return true; 4166 4167 return memcg_slab_post_charge(objp, gfpflags); 4168 } 4169 EXPORT_SYMBOL(kmem_cache_charge); 4170 4171 /** 4172 * kmem_cache_alloc_node - Allocate an object on the specified node 4173 * @s: The cache to allocate from. 4174 * @gfpflags: See kmalloc(). 4175 * @node: node number of the target node. 4176 * 4177 * Identical to kmem_cache_alloc but it will allocate memory on the given 4178 * node, which can improve the performance for cpu bound structures. 4179 * 4180 * Fallback to other node is possible if __GFP_THISNODE is not set. 4181 * 4182 * Return: pointer to the new object or %NULL in case of error 4183 */ 4184 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node) 4185 { 4186 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 4187 4188 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 4189 4190 return ret; 4191 } 4192 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); 4193 4194 /* 4195 * To avoid unnecessary overhead, we pass through large allocation requests 4196 * directly to the page allocator. We use __GFP_COMP, because we will need to 4197 * know the allocation order to free the pages properly in kfree. 4198 */ 4199 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) 4200 { 4201 struct folio *folio; 4202 void *ptr = NULL; 4203 unsigned int order = get_order(size); 4204 4205 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 4206 flags = kmalloc_fix_flags(flags); 4207 4208 flags |= __GFP_COMP; 4209 folio = (struct folio *)alloc_pages_node_noprof(node, flags, order); 4210 if (folio) { 4211 ptr = folio_address(folio); 4212 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4213 PAGE_SIZE << order); 4214 } 4215 4216 ptr = kasan_kmalloc_large(ptr, size, flags); 4217 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 4218 kmemleak_alloc(ptr, size, 1, flags); 4219 kmsan_kmalloc_large(ptr, size, flags); 4220 4221 return ptr; 4222 } 4223 4224 void *__kmalloc_large_noprof(size_t size, gfp_t flags) 4225 { 4226 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE); 4227 4228 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4229 flags, NUMA_NO_NODE); 4230 return ret; 4231 } 4232 EXPORT_SYMBOL(__kmalloc_large_noprof); 4233 4234 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) 4235 { 4236 void *ret = ___kmalloc_large_node(size, flags, node); 4237 4238 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4239 flags, node); 4240 return ret; 4241 } 4242 EXPORT_SYMBOL(__kmalloc_large_node_noprof); 4243 4244 static __always_inline 4245 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node, 4246 unsigned long caller) 4247 { 4248 struct kmem_cache *s; 4249 void *ret; 4250 4251 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4252 ret = __kmalloc_large_node_noprof(size, flags, node); 4253 trace_kmalloc(caller, ret, size, 4254 PAGE_SIZE << get_order(size), flags, node); 4255 return ret; 4256 } 4257 4258 if (unlikely(!size)) 4259 return ZERO_SIZE_PTR; 4260 4261 s = kmalloc_slab(size, b, flags, caller); 4262 4263 ret = slab_alloc_node(s, NULL, flags, node, caller, size); 4264 ret = kasan_kmalloc(s, ret, size, flags); 4265 trace_kmalloc(caller, ret, size, s->size, flags, node); 4266 return ret; 4267 } 4268 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) 4269 { 4270 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_); 4271 } 4272 EXPORT_SYMBOL(__kmalloc_node_noprof); 4273 4274 void *__kmalloc_noprof(size_t size, gfp_t flags) 4275 { 4276 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_); 4277 } 4278 EXPORT_SYMBOL(__kmalloc_noprof); 4279 4280 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, 4281 int node, unsigned long caller) 4282 { 4283 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller); 4284 4285 } 4286 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof); 4287 4288 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) 4289 { 4290 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 4291 _RET_IP_, size); 4292 4293 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 4294 4295 ret = kasan_kmalloc(s, ret, size, gfpflags); 4296 return ret; 4297 } 4298 EXPORT_SYMBOL(__kmalloc_cache_noprof); 4299 4300 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, 4301 int node, size_t size) 4302 { 4303 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); 4304 4305 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 4306 4307 ret = kasan_kmalloc(s, ret, size, gfpflags); 4308 return ret; 4309 } 4310 EXPORT_SYMBOL(__kmalloc_cache_node_noprof); 4311 4312 static noinline void free_to_partial_list( 4313 struct kmem_cache *s, struct slab *slab, 4314 void *head, void *tail, int bulk_cnt, 4315 unsigned long addr) 4316 { 4317 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 4318 struct slab *slab_free = NULL; 4319 int cnt = bulk_cnt; 4320 unsigned long flags; 4321 depot_stack_handle_t handle = 0; 4322 4323 if (s->flags & SLAB_STORE_USER) 4324 handle = set_track_prepare(); 4325 4326 spin_lock_irqsave(&n->list_lock, flags); 4327 4328 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 4329 void *prior = slab->freelist; 4330 4331 /* Perform the actual freeing while we still hold the locks */ 4332 slab->inuse -= cnt; 4333 set_freepointer(s, tail, prior); 4334 slab->freelist = head; 4335 4336 /* 4337 * If the slab is empty, and node's partial list is full, 4338 * it should be discarded anyway no matter it's on full or 4339 * partial list. 4340 */ 4341 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 4342 slab_free = slab; 4343 4344 if (!prior) { 4345 /* was on full list */ 4346 remove_full(s, n, slab); 4347 if (!slab_free) { 4348 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4349 stat(s, FREE_ADD_PARTIAL); 4350 } 4351 } else if (slab_free) { 4352 remove_partial(n, slab); 4353 stat(s, FREE_REMOVE_PARTIAL); 4354 } 4355 } 4356 4357 if (slab_free) { 4358 /* 4359 * Update the counters while still holding n->list_lock to 4360 * prevent spurious validation warnings 4361 */ 4362 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 4363 } 4364 4365 spin_unlock_irqrestore(&n->list_lock, flags); 4366 4367 if (slab_free) { 4368 stat(s, FREE_SLAB); 4369 free_slab(s, slab_free); 4370 } 4371 } 4372 4373 /* 4374 * Slow path handling. This may still be called frequently since objects 4375 * have a longer lifetime than the cpu slabs in most processing loads. 4376 * 4377 * So we still attempt to reduce cache line usage. Just take the slab 4378 * lock and free the item. If there is no additional partial slab 4379 * handling required then we can return immediately. 4380 */ 4381 static void __slab_free(struct kmem_cache *s, struct slab *slab, 4382 void *head, void *tail, int cnt, 4383 unsigned long addr) 4384 4385 { 4386 void *prior; 4387 int was_frozen; 4388 struct slab new; 4389 unsigned long counters; 4390 struct kmem_cache_node *n = NULL; 4391 unsigned long flags; 4392 bool on_node_partial; 4393 4394 stat(s, FREE_SLOWPATH); 4395 4396 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 4397 free_to_partial_list(s, slab, head, tail, cnt, addr); 4398 return; 4399 } 4400 4401 do { 4402 if (unlikely(n)) { 4403 spin_unlock_irqrestore(&n->list_lock, flags); 4404 n = NULL; 4405 } 4406 prior = slab->freelist; 4407 counters = slab->counters; 4408 set_freepointer(s, tail, prior); 4409 new.counters = counters; 4410 was_frozen = new.frozen; 4411 new.inuse -= cnt; 4412 if ((!new.inuse || !prior) && !was_frozen) { 4413 /* Needs to be taken off a list */ 4414 if (!kmem_cache_has_cpu_partial(s) || prior) { 4415 4416 n = get_node(s, slab_nid(slab)); 4417 /* 4418 * Speculatively acquire the list_lock. 4419 * If the cmpxchg does not succeed then we may 4420 * drop the list_lock without any processing. 4421 * 4422 * Otherwise the list_lock will synchronize with 4423 * other processors updating the list of slabs. 4424 */ 4425 spin_lock_irqsave(&n->list_lock, flags); 4426 4427 on_node_partial = slab_test_node_partial(slab); 4428 } 4429 } 4430 4431 } while (!slab_update_freelist(s, slab, 4432 prior, counters, 4433 head, new.counters, 4434 "__slab_free")); 4435 4436 if (likely(!n)) { 4437 4438 if (likely(was_frozen)) { 4439 /* 4440 * The list lock was not taken therefore no list 4441 * activity can be necessary. 4442 */ 4443 stat(s, FREE_FROZEN); 4444 } else if (kmem_cache_has_cpu_partial(s) && !prior) { 4445 /* 4446 * If we started with a full slab then put it onto the 4447 * per cpu partial list. 4448 */ 4449 put_cpu_partial(s, slab, 1); 4450 stat(s, CPU_PARTIAL_FREE); 4451 } 4452 4453 return; 4454 } 4455 4456 /* 4457 * This slab was partially empty but not on the per-node partial list, 4458 * in which case we shouldn't manipulate its list, just return. 4459 */ 4460 if (prior && !on_node_partial) { 4461 spin_unlock_irqrestore(&n->list_lock, flags); 4462 return; 4463 } 4464 4465 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 4466 goto slab_empty; 4467 4468 /* 4469 * Objects left in the slab. If it was not on the partial list before 4470 * then add it. 4471 */ 4472 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 4473 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4474 stat(s, FREE_ADD_PARTIAL); 4475 } 4476 spin_unlock_irqrestore(&n->list_lock, flags); 4477 return; 4478 4479 slab_empty: 4480 if (prior) { 4481 /* 4482 * Slab on the partial list. 4483 */ 4484 remove_partial(n, slab); 4485 stat(s, FREE_REMOVE_PARTIAL); 4486 } 4487 4488 spin_unlock_irqrestore(&n->list_lock, flags); 4489 stat(s, FREE_SLAB); 4490 discard_slab(s, slab); 4491 } 4492 4493 #ifndef CONFIG_SLUB_TINY 4494 /* 4495 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 4496 * can perform fastpath freeing without additional function calls. 4497 * 4498 * The fastpath is only possible if we are freeing to the current cpu slab 4499 * of this processor. This typically the case if we have just allocated 4500 * the item before. 4501 * 4502 * If fastpath is not possible then fall back to __slab_free where we deal 4503 * with all sorts of special processing. 4504 * 4505 * Bulk free of a freelist with several objects (all pointing to the 4506 * same slab) possible by specifying head and tail ptr, plus objects 4507 * count (cnt). Bulk free indicated by tail pointer being set. 4508 */ 4509 static __always_inline void do_slab_free(struct kmem_cache *s, 4510 struct slab *slab, void *head, void *tail, 4511 int cnt, unsigned long addr) 4512 { 4513 struct kmem_cache_cpu *c; 4514 unsigned long tid; 4515 void **freelist; 4516 4517 redo: 4518 /* 4519 * Determine the currently cpus per cpu slab. 4520 * The cpu may change afterward. However that does not matter since 4521 * data is retrieved via this pointer. If we are on the same cpu 4522 * during the cmpxchg then the free will succeed. 4523 */ 4524 c = raw_cpu_ptr(s->cpu_slab); 4525 tid = READ_ONCE(c->tid); 4526 4527 /* Same with comment on barrier() in __slab_alloc_node() */ 4528 barrier(); 4529 4530 if (unlikely(slab != c->slab)) { 4531 __slab_free(s, slab, head, tail, cnt, addr); 4532 return; 4533 } 4534 4535 if (USE_LOCKLESS_FAST_PATH()) { 4536 freelist = READ_ONCE(c->freelist); 4537 4538 set_freepointer(s, tail, freelist); 4539 4540 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 4541 note_cmpxchg_failure("slab_free", s, tid); 4542 goto redo; 4543 } 4544 } else { 4545 /* Update the free list under the local lock */ 4546 local_lock(&s->cpu_slab->lock); 4547 c = this_cpu_ptr(s->cpu_slab); 4548 if (unlikely(slab != c->slab)) { 4549 local_unlock(&s->cpu_slab->lock); 4550 goto redo; 4551 } 4552 tid = c->tid; 4553 freelist = c->freelist; 4554 4555 set_freepointer(s, tail, freelist); 4556 c->freelist = head; 4557 c->tid = next_tid(tid); 4558 4559 local_unlock(&s->cpu_slab->lock); 4560 } 4561 stat_add(s, FREE_FASTPATH, cnt); 4562 } 4563 #else /* CONFIG_SLUB_TINY */ 4564 static void do_slab_free(struct kmem_cache *s, 4565 struct slab *slab, void *head, void *tail, 4566 int cnt, unsigned long addr) 4567 { 4568 __slab_free(s, slab, head, tail, cnt, addr); 4569 } 4570 #endif /* CONFIG_SLUB_TINY */ 4571 4572 static __fastpath_inline 4573 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, 4574 unsigned long addr) 4575 { 4576 memcg_slab_free_hook(s, slab, &object, 1); 4577 alloc_tagging_slab_free_hook(s, slab, &object, 1); 4578 4579 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) 4580 do_slab_free(s, slab, object, object, 1, addr); 4581 } 4582 4583 #ifdef CONFIG_MEMCG 4584 /* Do not inline the rare memcg charging failed path into the allocation path */ 4585 static noinline 4586 void memcg_alloc_abort_single(struct kmem_cache *s, void *object) 4587 { 4588 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) 4589 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); 4590 } 4591 #endif 4592 4593 static __fastpath_inline 4594 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, 4595 void *tail, void **p, int cnt, unsigned long addr) 4596 { 4597 memcg_slab_free_hook(s, slab, p, cnt); 4598 alloc_tagging_slab_free_hook(s, slab, p, cnt); 4599 /* 4600 * With KASAN enabled slab_free_freelist_hook modifies the freelist 4601 * to remove objects, whose reuse must be delayed. 4602 */ 4603 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) 4604 do_slab_free(s, slab, head, tail, cnt, addr); 4605 } 4606 4607 #ifdef CONFIG_SLUB_RCU_DEBUG 4608 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head) 4609 { 4610 struct rcu_delayed_free *delayed_free = 4611 container_of(rcu_head, struct rcu_delayed_free, head); 4612 void *object = delayed_free->object; 4613 struct slab *slab = virt_to_slab(object); 4614 struct kmem_cache *s; 4615 4616 kfree(delayed_free); 4617 4618 if (WARN_ON(is_kfence_address(object))) 4619 return; 4620 4621 /* find the object and the cache again */ 4622 if (WARN_ON(!slab)) 4623 return; 4624 s = slab->slab_cache; 4625 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU))) 4626 return; 4627 4628 /* resume freeing */ 4629 if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) 4630 do_slab_free(s, slab, object, object, 1, _THIS_IP_); 4631 } 4632 #endif /* CONFIG_SLUB_RCU_DEBUG */ 4633 4634 #ifdef CONFIG_KASAN_GENERIC 4635 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 4636 { 4637 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); 4638 } 4639 #endif 4640 4641 static inline struct kmem_cache *virt_to_cache(const void *obj) 4642 { 4643 struct slab *slab; 4644 4645 slab = virt_to_slab(obj); 4646 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) 4647 return NULL; 4648 return slab->slab_cache; 4649 } 4650 4651 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 4652 { 4653 struct kmem_cache *cachep; 4654 4655 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 4656 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 4657 return s; 4658 4659 cachep = virt_to_cache(x); 4660 if (WARN(cachep && cachep != s, 4661 "%s: Wrong slab cache. %s but object is from %s\n", 4662 __func__, s->name, cachep->name)) 4663 print_tracking(cachep, x); 4664 return cachep; 4665 } 4666 4667 /** 4668 * kmem_cache_free - Deallocate an object 4669 * @s: The cache the allocation was from. 4670 * @x: The previously allocated object. 4671 * 4672 * Free an object which was previously allocated from this 4673 * cache. 4674 */ 4675 void kmem_cache_free(struct kmem_cache *s, void *x) 4676 { 4677 s = cache_from_obj(s, x); 4678 if (!s) 4679 return; 4680 trace_kmem_cache_free(_RET_IP_, x, s); 4681 slab_free(s, virt_to_slab(x), x, _RET_IP_); 4682 } 4683 EXPORT_SYMBOL(kmem_cache_free); 4684 4685 static void free_large_kmalloc(struct folio *folio, void *object) 4686 { 4687 unsigned int order = folio_order(folio); 4688 4689 if (WARN_ON_ONCE(order == 0)) 4690 pr_warn_once("object pointer: 0x%p\n", object); 4691 4692 kmemleak_free(object); 4693 kasan_kfree_large(object); 4694 kmsan_kfree_large(object); 4695 4696 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4697 -(PAGE_SIZE << order)); 4698 folio_put(folio); 4699 } 4700 4701 /** 4702 * kfree - free previously allocated memory 4703 * @object: pointer returned by kmalloc() or kmem_cache_alloc() 4704 * 4705 * If @object is NULL, no operation is performed. 4706 */ 4707 void kfree(const void *object) 4708 { 4709 struct folio *folio; 4710 struct slab *slab; 4711 struct kmem_cache *s; 4712 void *x = (void *)object; 4713 4714 trace_kfree(_RET_IP_, object); 4715 4716 if (unlikely(ZERO_OR_NULL_PTR(object))) 4717 return; 4718 4719 folio = virt_to_folio(object); 4720 if (unlikely(!folio_test_slab(folio))) { 4721 free_large_kmalloc(folio, (void *)object); 4722 return; 4723 } 4724 4725 slab = folio_slab(folio); 4726 s = slab->slab_cache; 4727 slab_free(s, slab, x, _RET_IP_); 4728 } 4729 EXPORT_SYMBOL(kfree); 4730 4731 struct detached_freelist { 4732 struct slab *slab; 4733 void *tail; 4734 void *freelist; 4735 int cnt; 4736 struct kmem_cache *s; 4737 }; 4738 4739 /* 4740 * This function progressively scans the array with free objects (with 4741 * a limited look ahead) and extract objects belonging to the same 4742 * slab. It builds a detached freelist directly within the given 4743 * slab/objects. This can happen without any need for 4744 * synchronization, because the objects are owned by running process. 4745 * The freelist is build up as a single linked list in the objects. 4746 * The idea is, that this detached freelist can then be bulk 4747 * transferred to the real freelist(s), but only requiring a single 4748 * synchronization primitive. Look ahead in the array is limited due 4749 * to performance reasons. 4750 */ 4751 static inline 4752 int build_detached_freelist(struct kmem_cache *s, size_t size, 4753 void **p, struct detached_freelist *df) 4754 { 4755 int lookahead = 3; 4756 void *object; 4757 struct folio *folio; 4758 size_t same; 4759 4760 object = p[--size]; 4761 folio = virt_to_folio(object); 4762 if (!s) { 4763 /* Handle kalloc'ed objects */ 4764 if (unlikely(!folio_test_slab(folio))) { 4765 free_large_kmalloc(folio, object); 4766 df->slab = NULL; 4767 return size; 4768 } 4769 /* Derive kmem_cache from object */ 4770 df->slab = folio_slab(folio); 4771 df->s = df->slab->slab_cache; 4772 } else { 4773 df->slab = folio_slab(folio); 4774 df->s = cache_from_obj(s, object); /* Support for memcg */ 4775 } 4776 4777 /* Start new detached freelist */ 4778 df->tail = object; 4779 df->freelist = object; 4780 df->cnt = 1; 4781 4782 if (is_kfence_address(object)) 4783 return size; 4784 4785 set_freepointer(df->s, object, NULL); 4786 4787 same = size; 4788 while (size) { 4789 object = p[--size]; 4790 /* df->slab is always set at this point */ 4791 if (df->slab == virt_to_slab(object)) { 4792 /* Opportunity build freelist */ 4793 set_freepointer(df->s, object, df->freelist); 4794 df->freelist = object; 4795 df->cnt++; 4796 same--; 4797 if (size != same) 4798 swap(p[size], p[same]); 4799 continue; 4800 } 4801 4802 /* Limit look ahead search */ 4803 if (!--lookahead) 4804 break; 4805 } 4806 4807 return same; 4808 } 4809 4810 /* 4811 * Internal bulk free of objects that were not initialised by the post alloc 4812 * hooks and thus should not be processed by the free hooks 4813 */ 4814 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4815 { 4816 if (!size) 4817 return; 4818 4819 do { 4820 struct detached_freelist df; 4821 4822 size = build_detached_freelist(s, size, p, &df); 4823 if (!df.slab) 4824 continue; 4825 4826 if (kfence_free(df.freelist)) 4827 continue; 4828 4829 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 4830 _RET_IP_); 4831 } while (likely(size)); 4832 } 4833 4834 /* Note that interrupts must be enabled when calling this function. */ 4835 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4836 { 4837 if (!size) 4838 return; 4839 4840 do { 4841 struct detached_freelist df; 4842 4843 size = build_detached_freelist(s, size, p, &df); 4844 if (!df.slab) 4845 continue; 4846 4847 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], 4848 df.cnt, _RET_IP_); 4849 } while (likely(size)); 4850 } 4851 EXPORT_SYMBOL(kmem_cache_free_bulk); 4852 4853 #ifndef CONFIG_SLUB_TINY 4854 static inline 4855 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 4856 void **p) 4857 { 4858 struct kmem_cache_cpu *c; 4859 unsigned long irqflags; 4860 int i; 4861 4862 /* 4863 * Drain objects in the per cpu slab, while disabling local 4864 * IRQs, which protects against PREEMPT and interrupts 4865 * handlers invoking normal fastpath. 4866 */ 4867 c = slub_get_cpu_ptr(s->cpu_slab); 4868 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4869 4870 for (i = 0; i < size; i++) { 4871 void *object = kfence_alloc(s, s->object_size, flags); 4872 4873 if (unlikely(object)) { 4874 p[i] = object; 4875 continue; 4876 } 4877 4878 object = c->freelist; 4879 if (unlikely(!object)) { 4880 /* 4881 * We may have removed an object from c->freelist using 4882 * the fastpath in the previous iteration; in that case, 4883 * c->tid has not been bumped yet. 4884 * Since ___slab_alloc() may reenable interrupts while 4885 * allocating memory, we should bump c->tid now. 4886 */ 4887 c->tid = next_tid(c->tid); 4888 4889 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4890 4891 /* 4892 * Invoking slow path likely have side-effect 4893 * of re-populating per CPU c->freelist 4894 */ 4895 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 4896 _RET_IP_, c, s->object_size); 4897 if (unlikely(!p[i])) 4898 goto error; 4899 4900 c = this_cpu_ptr(s->cpu_slab); 4901 maybe_wipe_obj_freeptr(s, p[i]); 4902 4903 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4904 4905 continue; /* goto for-loop */ 4906 } 4907 c->freelist = get_freepointer(s, object); 4908 p[i] = object; 4909 maybe_wipe_obj_freeptr(s, p[i]); 4910 stat(s, ALLOC_FASTPATH); 4911 } 4912 c->tid = next_tid(c->tid); 4913 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4914 slub_put_cpu_ptr(s->cpu_slab); 4915 4916 return i; 4917 4918 error: 4919 slub_put_cpu_ptr(s->cpu_slab); 4920 __kmem_cache_free_bulk(s, i, p); 4921 return 0; 4922 4923 } 4924 #else /* CONFIG_SLUB_TINY */ 4925 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 4926 size_t size, void **p) 4927 { 4928 int i; 4929 4930 for (i = 0; i < size; i++) { 4931 void *object = kfence_alloc(s, s->object_size, flags); 4932 4933 if (unlikely(object)) { 4934 p[i] = object; 4935 continue; 4936 } 4937 4938 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, 4939 _RET_IP_, s->object_size); 4940 if (unlikely(!p[i])) 4941 goto error; 4942 4943 maybe_wipe_obj_freeptr(s, p[i]); 4944 } 4945 4946 return i; 4947 4948 error: 4949 __kmem_cache_free_bulk(s, i, p); 4950 return 0; 4951 } 4952 #endif /* CONFIG_SLUB_TINY */ 4953 4954 /* Note that interrupts must be enabled when calling this function. */ 4955 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, 4956 void **p) 4957 { 4958 int i; 4959 4960 if (!size) 4961 return 0; 4962 4963 s = slab_pre_alloc_hook(s, flags); 4964 if (unlikely(!s)) 4965 return 0; 4966 4967 i = __kmem_cache_alloc_bulk(s, flags, size, p); 4968 if (unlikely(i == 0)) 4969 return 0; 4970 4971 /* 4972 * memcg and kmem_cache debug support and memory initialization. 4973 * Done outside of the IRQ disabled fastpath loop. 4974 */ 4975 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p, 4976 slab_want_init_on_alloc(flags, s), s->object_size))) { 4977 return 0; 4978 } 4979 return i; 4980 } 4981 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof); 4982 4983 4984 /* 4985 * Object placement in a slab is made very easy because we always start at 4986 * offset 0. If we tune the size of the object to the alignment then we can 4987 * get the required alignment by putting one properly sized object after 4988 * another. 4989 * 4990 * Notice that the allocation order determines the sizes of the per cpu 4991 * caches. Each processor has always one slab available for allocations. 4992 * Increasing the allocation order reduces the number of times that slabs 4993 * must be moved on and off the partial lists and is therefore a factor in 4994 * locking overhead. 4995 */ 4996 4997 /* 4998 * Minimum / Maximum order of slab pages. This influences locking overhead 4999 * and slab fragmentation. A higher order reduces the number of partial slabs 5000 * and increases the number of allocations possible without having to 5001 * take the list_lock. 5002 */ 5003 static unsigned int slub_min_order; 5004 static unsigned int slub_max_order = 5005 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 5006 static unsigned int slub_min_objects; 5007 5008 /* 5009 * Calculate the order of allocation given an slab object size. 5010 * 5011 * The order of allocation has significant impact on performance and other 5012 * system components. Generally order 0 allocations should be preferred since 5013 * order 0 does not cause fragmentation in the page allocator. Larger objects 5014 * be problematic to put into order 0 slabs because there may be too much 5015 * unused space left. We go to a higher order if more than 1/16th of the slab 5016 * would be wasted. 5017 * 5018 * In order to reach satisfactory performance we must ensure that a minimum 5019 * number of objects is in one slab. Otherwise we may generate too much 5020 * activity on the partial lists which requires taking the list_lock. This is 5021 * less a concern for large slabs though which are rarely used. 5022 * 5023 * slab_max_order specifies the order where we begin to stop considering the 5024 * number of objects in a slab as critical. If we reach slab_max_order then 5025 * we try to keep the page order as low as possible. So we accept more waste 5026 * of space in favor of a small page order. 5027 * 5028 * Higher order allocations also allow the placement of more objects in a 5029 * slab and thereby reduce object handling overhead. If the user has 5030 * requested a higher minimum order then we start with that one instead of 5031 * the smallest order which will fit the object. 5032 */ 5033 static inline unsigned int calc_slab_order(unsigned int size, 5034 unsigned int min_order, unsigned int max_order, 5035 unsigned int fract_leftover) 5036 { 5037 unsigned int order; 5038 5039 for (order = min_order; order <= max_order; order++) { 5040 5041 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 5042 unsigned int rem; 5043 5044 rem = slab_size % size; 5045 5046 if (rem <= slab_size / fract_leftover) 5047 break; 5048 } 5049 5050 return order; 5051 } 5052 5053 static inline int calculate_order(unsigned int size) 5054 { 5055 unsigned int order; 5056 unsigned int min_objects; 5057 unsigned int max_objects; 5058 unsigned int min_order; 5059 5060 min_objects = slub_min_objects; 5061 if (!min_objects) { 5062 /* 5063 * Some architectures will only update present cpus when 5064 * onlining them, so don't trust the number if it's just 1. But 5065 * we also don't want to use nr_cpu_ids always, as on some other 5066 * architectures, there can be many possible cpus, but never 5067 * onlined. Here we compromise between trying to avoid too high 5068 * order on systems that appear larger than they are, and too 5069 * low order on systems that appear smaller than they are. 5070 */ 5071 unsigned int nr_cpus = num_present_cpus(); 5072 if (nr_cpus <= 1) 5073 nr_cpus = nr_cpu_ids; 5074 min_objects = 4 * (fls(nr_cpus) + 1); 5075 } 5076 /* min_objects can't be 0 because get_order(0) is undefined */ 5077 max_objects = max(order_objects(slub_max_order, size), 1U); 5078 min_objects = min(min_objects, max_objects); 5079 5080 min_order = max_t(unsigned int, slub_min_order, 5081 get_order(min_objects * size)); 5082 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 5083 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 5084 5085 /* 5086 * Attempt to find best configuration for a slab. This works by first 5087 * attempting to generate a layout with the best possible configuration 5088 * and backing off gradually. 5089 * 5090 * We start with accepting at most 1/16 waste and try to find the 5091 * smallest order from min_objects-derived/slab_min_order up to 5092 * slab_max_order that will satisfy the constraint. Note that increasing 5093 * the order can only result in same or less fractional waste, not more. 5094 * 5095 * If that fails, we increase the acceptable fraction of waste and try 5096 * again. The last iteration with fraction of 1/2 would effectively 5097 * accept any waste and give us the order determined by min_objects, as 5098 * long as at least single object fits within slab_max_order. 5099 */ 5100 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { 5101 order = calc_slab_order(size, min_order, slub_max_order, 5102 fraction); 5103 if (order <= slub_max_order) 5104 return order; 5105 } 5106 5107 /* 5108 * Doh this slab cannot be placed using slab_max_order. 5109 */ 5110 order = get_order(size); 5111 if (order <= MAX_PAGE_ORDER) 5112 return order; 5113 return -ENOSYS; 5114 } 5115 5116 static void 5117 init_kmem_cache_node(struct kmem_cache_node *n) 5118 { 5119 n->nr_partial = 0; 5120 spin_lock_init(&n->list_lock); 5121 INIT_LIST_HEAD(&n->partial); 5122 #ifdef CONFIG_SLUB_DEBUG 5123 atomic_long_set(&n->nr_slabs, 0); 5124 atomic_long_set(&n->total_objects, 0); 5125 INIT_LIST_HEAD(&n->full); 5126 #endif 5127 } 5128 5129 #ifndef CONFIG_SLUB_TINY 5130 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 5131 { 5132 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 5133 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 5134 sizeof(struct kmem_cache_cpu)); 5135 5136 /* 5137 * Must align to double word boundary for the double cmpxchg 5138 * instructions to work; see __pcpu_double_call_return_bool(). 5139 */ 5140 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 5141 2 * sizeof(void *)); 5142 5143 if (!s->cpu_slab) 5144 return 0; 5145 5146 init_kmem_cache_cpus(s); 5147 5148 return 1; 5149 } 5150 #else 5151 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 5152 { 5153 return 1; 5154 } 5155 #endif /* CONFIG_SLUB_TINY */ 5156 5157 static struct kmem_cache *kmem_cache_node; 5158 5159 /* 5160 * No kmalloc_node yet so do it by hand. We know that this is the first 5161 * slab on the node for this slabcache. There are no concurrent accesses 5162 * possible. 5163 * 5164 * Note that this function only works on the kmem_cache_node 5165 * when allocating for the kmem_cache_node. This is used for bootstrapping 5166 * memory on a fresh node that has no slab structures yet. 5167 */ 5168 static void early_kmem_cache_node_alloc(int node) 5169 { 5170 struct slab *slab; 5171 struct kmem_cache_node *n; 5172 5173 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 5174 5175 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 5176 5177 BUG_ON(!slab); 5178 if (slab_nid(slab) != node) { 5179 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 5180 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 5181 } 5182 5183 n = slab->freelist; 5184 BUG_ON(!n); 5185 #ifdef CONFIG_SLUB_DEBUG 5186 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 5187 #endif 5188 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 5189 slab->freelist = get_freepointer(kmem_cache_node, n); 5190 slab->inuse = 1; 5191 kmem_cache_node->node[node] = n; 5192 init_kmem_cache_node(n); 5193 inc_slabs_node(kmem_cache_node, node, slab->objects); 5194 5195 /* 5196 * No locks need to be taken here as it has just been 5197 * initialized and there is no concurrent access. 5198 */ 5199 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 5200 } 5201 5202 static void free_kmem_cache_nodes(struct kmem_cache *s) 5203 { 5204 int node; 5205 struct kmem_cache_node *n; 5206 5207 for_each_kmem_cache_node(s, node, n) { 5208 s->node[node] = NULL; 5209 kmem_cache_free(kmem_cache_node, n); 5210 } 5211 } 5212 5213 void __kmem_cache_release(struct kmem_cache *s) 5214 { 5215 cache_random_seq_destroy(s); 5216 #ifndef CONFIG_SLUB_TINY 5217 free_percpu(s->cpu_slab); 5218 #endif 5219 free_kmem_cache_nodes(s); 5220 } 5221 5222 static int init_kmem_cache_nodes(struct kmem_cache *s) 5223 { 5224 int node; 5225 5226 for_each_node_mask(node, slab_nodes) { 5227 struct kmem_cache_node *n; 5228 5229 if (slab_state == DOWN) { 5230 early_kmem_cache_node_alloc(node); 5231 continue; 5232 } 5233 n = kmem_cache_alloc_node(kmem_cache_node, 5234 GFP_KERNEL, node); 5235 5236 if (!n) { 5237 free_kmem_cache_nodes(s); 5238 return 0; 5239 } 5240 5241 init_kmem_cache_node(n); 5242 s->node[node] = n; 5243 } 5244 return 1; 5245 } 5246 5247 static void set_cpu_partial(struct kmem_cache *s) 5248 { 5249 #ifdef CONFIG_SLUB_CPU_PARTIAL 5250 unsigned int nr_objects; 5251 5252 /* 5253 * cpu_partial determined the maximum number of objects kept in the 5254 * per cpu partial lists of a processor. 5255 * 5256 * Per cpu partial lists mainly contain slabs that just have one 5257 * object freed. If they are used for allocation then they can be 5258 * filled up again with minimal effort. The slab will never hit the 5259 * per node partial lists and therefore no locking will be required. 5260 * 5261 * For backwards compatibility reasons, this is determined as number 5262 * of objects, even though we now limit maximum number of pages, see 5263 * slub_set_cpu_partial() 5264 */ 5265 if (!kmem_cache_has_cpu_partial(s)) 5266 nr_objects = 0; 5267 else if (s->size >= PAGE_SIZE) 5268 nr_objects = 6; 5269 else if (s->size >= 1024) 5270 nr_objects = 24; 5271 else if (s->size >= 256) 5272 nr_objects = 52; 5273 else 5274 nr_objects = 120; 5275 5276 slub_set_cpu_partial(s, nr_objects); 5277 #endif 5278 } 5279 5280 /* 5281 * calculate_sizes() determines the order and the distribution of data within 5282 * a slab object. 5283 */ 5284 static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) 5285 { 5286 slab_flags_t flags = s->flags; 5287 unsigned int size = s->object_size; 5288 unsigned int order; 5289 5290 /* 5291 * Round up object size to the next word boundary. We can only 5292 * place the free pointer at word boundaries and this determines 5293 * the possible location of the free pointer. 5294 */ 5295 size = ALIGN(size, sizeof(void *)); 5296 5297 #ifdef CONFIG_SLUB_DEBUG 5298 /* 5299 * Determine if we can poison the object itself. If the user of 5300 * the slab may touch the object after free or before allocation 5301 * then we should never poison the object itself. 5302 */ 5303 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 5304 !s->ctor) 5305 s->flags |= __OBJECT_POISON; 5306 else 5307 s->flags &= ~__OBJECT_POISON; 5308 5309 5310 /* 5311 * If we are Redzoning then check if there is some space between the 5312 * end of the object and the free pointer. If not then add an 5313 * additional word to have some bytes to store Redzone information. 5314 */ 5315 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 5316 size += sizeof(void *); 5317 #endif 5318 5319 /* 5320 * With that we have determined the number of bytes in actual use 5321 * by the object and redzoning. 5322 */ 5323 s->inuse = size; 5324 5325 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) || 5326 (flags & SLAB_POISON) || s->ctor || 5327 ((flags & SLAB_RED_ZONE) && 5328 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { 5329 /* 5330 * Relocate free pointer after the object if it is not 5331 * permitted to overwrite the first word of the object on 5332 * kmem_cache_free. 5333 * 5334 * This is the case if we do RCU, have a constructor or 5335 * destructor, are poisoning the objects, or are 5336 * redzoning an object smaller than sizeof(void *) or are 5337 * redzoning an object with slub_debug_orig_size() enabled, 5338 * in which case the right redzone may be extended. 5339 * 5340 * The assumption that s->offset >= s->inuse means free 5341 * pointer is outside of the object is used in the 5342 * freeptr_outside_object() function. If that is no 5343 * longer true, the function needs to be modified. 5344 */ 5345 s->offset = size; 5346 size += sizeof(void *); 5347 } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) { 5348 s->offset = args->freeptr_offset; 5349 } else { 5350 /* 5351 * Store freelist pointer near middle of object to keep 5352 * it away from the edges of the object to avoid small 5353 * sized over/underflows from neighboring allocations. 5354 */ 5355 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 5356 } 5357 5358 #ifdef CONFIG_SLUB_DEBUG 5359 if (flags & SLAB_STORE_USER) { 5360 /* 5361 * Need to store information about allocs and frees after 5362 * the object. 5363 */ 5364 size += 2 * sizeof(struct track); 5365 5366 /* Save the original kmalloc request size */ 5367 if (flags & SLAB_KMALLOC) 5368 size += sizeof(unsigned int); 5369 } 5370 #endif 5371 5372 kasan_cache_create(s, &size, &s->flags); 5373 #ifdef CONFIG_SLUB_DEBUG 5374 if (flags & SLAB_RED_ZONE) { 5375 /* 5376 * Add some empty padding so that we can catch 5377 * overwrites from earlier objects rather than let 5378 * tracking information or the free pointer be 5379 * corrupted if a user writes before the start 5380 * of the object. 5381 */ 5382 size += sizeof(void *); 5383 5384 s->red_left_pad = sizeof(void *); 5385 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 5386 size += s->red_left_pad; 5387 } 5388 #endif 5389 5390 /* 5391 * SLUB stores one object immediately after another beginning from 5392 * offset 0. In order to align the objects we have to simply size 5393 * each object to conform to the alignment. 5394 */ 5395 size = ALIGN(size, s->align); 5396 s->size = size; 5397 s->reciprocal_size = reciprocal_value(size); 5398 order = calculate_order(size); 5399 5400 if ((int)order < 0) 5401 return 0; 5402 5403 s->allocflags = __GFP_COMP; 5404 5405 if (s->flags & SLAB_CACHE_DMA) 5406 s->allocflags |= GFP_DMA; 5407 5408 if (s->flags & SLAB_CACHE_DMA32) 5409 s->allocflags |= GFP_DMA32; 5410 5411 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5412 s->allocflags |= __GFP_RECLAIMABLE; 5413 5414 /* 5415 * Determine the number of objects per slab 5416 */ 5417 s->oo = oo_make(order, size); 5418 s->min = oo_make(get_order(size), size); 5419 5420 return !!oo_objects(s->oo); 5421 } 5422 5423 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, 5424 const char *text) 5425 { 5426 #ifdef CONFIG_SLUB_DEBUG 5427 void *addr = slab_address(slab); 5428 void *p; 5429 5430 slab_err(s, slab, text, s->name); 5431 5432 spin_lock(&object_map_lock); 5433 __fill_map(object_map, s, slab); 5434 5435 for_each_object(p, s, addr, slab->objects) { 5436 5437 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 5438 if (slab_add_kunit_errors()) 5439 continue; 5440 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 5441 print_tracking(s, p); 5442 } 5443 } 5444 spin_unlock(&object_map_lock); 5445 #endif 5446 } 5447 5448 /* 5449 * Attempt to free all partial slabs on a node. 5450 * This is called from __kmem_cache_shutdown(). We must take list_lock 5451 * because sysfs file might still access partial list after the shutdowning. 5452 */ 5453 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 5454 { 5455 LIST_HEAD(discard); 5456 struct slab *slab, *h; 5457 5458 BUG_ON(irqs_disabled()); 5459 spin_lock_irq(&n->list_lock); 5460 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 5461 if (!slab->inuse) { 5462 remove_partial(n, slab); 5463 list_add(&slab->slab_list, &discard); 5464 } else { 5465 list_slab_objects(s, slab, 5466 "Objects remaining in %s on __kmem_cache_shutdown()"); 5467 } 5468 } 5469 spin_unlock_irq(&n->list_lock); 5470 5471 list_for_each_entry_safe(slab, h, &discard, slab_list) 5472 discard_slab(s, slab); 5473 } 5474 5475 bool __kmem_cache_empty(struct kmem_cache *s) 5476 { 5477 int node; 5478 struct kmem_cache_node *n; 5479 5480 for_each_kmem_cache_node(s, node, n) 5481 if (n->nr_partial || node_nr_slabs(n)) 5482 return false; 5483 return true; 5484 } 5485 5486 /* 5487 * Release all resources used by a slab cache. 5488 */ 5489 int __kmem_cache_shutdown(struct kmem_cache *s) 5490 { 5491 int node; 5492 struct kmem_cache_node *n; 5493 5494 flush_all_cpus_locked(s); 5495 /* Attempt to free all objects */ 5496 for_each_kmem_cache_node(s, node, n) { 5497 free_partial(s, n); 5498 if (n->nr_partial || node_nr_slabs(n)) 5499 return 1; 5500 } 5501 return 0; 5502 } 5503 5504 #ifdef CONFIG_PRINTK 5505 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 5506 { 5507 void *base; 5508 int __maybe_unused i; 5509 unsigned int objnr; 5510 void *objp; 5511 void *objp0; 5512 struct kmem_cache *s = slab->slab_cache; 5513 struct track __maybe_unused *trackp; 5514 5515 kpp->kp_ptr = object; 5516 kpp->kp_slab = slab; 5517 kpp->kp_slab_cache = s; 5518 base = slab_address(slab); 5519 objp0 = kasan_reset_tag(object); 5520 #ifdef CONFIG_SLUB_DEBUG 5521 objp = restore_red_left(s, objp0); 5522 #else 5523 objp = objp0; 5524 #endif 5525 objnr = obj_to_index(s, slab, objp); 5526 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 5527 objp = base + s->size * objnr; 5528 kpp->kp_objp = objp; 5529 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 5530 || (objp - base) % s->size) || 5531 !(s->flags & SLAB_STORE_USER)) 5532 return; 5533 #ifdef CONFIG_SLUB_DEBUG 5534 objp = fixup_red_left(s, objp); 5535 trackp = get_track(s, objp, TRACK_ALLOC); 5536 kpp->kp_ret = (void *)trackp->addr; 5537 #ifdef CONFIG_STACKDEPOT 5538 { 5539 depot_stack_handle_t handle; 5540 unsigned long *entries; 5541 unsigned int nr_entries; 5542 5543 handle = READ_ONCE(trackp->handle); 5544 if (handle) { 5545 nr_entries = stack_depot_fetch(handle, &entries); 5546 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5547 kpp->kp_stack[i] = (void *)entries[i]; 5548 } 5549 5550 trackp = get_track(s, objp, TRACK_FREE); 5551 handle = READ_ONCE(trackp->handle); 5552 if (handle) { 5553 nr_entries = stack_depot_fetch(handle, &entries); 5554 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5555 kpp->kp_free_stack[i] = (void *)entries[i]; 5556 } 5557 } 5558 #endif 5559 #endif 5560 } 5561 #endif 5562 5563 /******************************************************************** 5564 * Kmalloc subsystem 5565 *******************************************************************/ 5566 5567 static int __init setup_slub_min_order(char *str) 5568 { 5569 get_option(&str, (int *)&slub_min_order); 5570 5571 if (slub_min_order > slub_max_order) 5572 slub_max_order = slub_min_order; 5573 5574 return 1; 5575 } 5576 5577 __setup("slab_min_order=", setup_slub_min_order); 5578 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); 5579 5580 5581 static int __init setup_slub_max_order(char *str) 5582 { 5583 get_option(&str, (int *)&slub_max_order); 5584 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); 5585 5586 if (slub_min_order > slub_max_order) 5587 slub_min_order = slub_max_order; 5588 5589 return 1; 5590 } 5591 5592 __setup("slab_max_order=", setup_slub_max_order); 5593 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); 5594 5595 static int __init setup_slub_min_objects(char *str) 5596 { 5597 get_option(&str, (int *)&slub_min_objects); 5598 5599 return 1; 5600 } 5601 5602 __setup("slab_min_objects=", setup_slub_min_objects); 5603 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); 5604 5605 #ifdef CONFIG_HARDENED_USERCOPY 5606 /* 5607 * Rejects incorrectly sized objects and objects that are to be copied 5608 * to/from userspace but do not fall entirely within the containing slab 5609 * cache's usercopy region. 5610 * 5611 * Returns NULL if check passes, otherwise const char * to name of cache 5612 * to indicate an error. 5613 */ 5614 void __check_heap_object(const void *ptr, unsigned long n, 5615 const struct slab *slab, bool to_user) 5616 { 5617 struct kmem_cache *s; 5618 unsigned int offset; 5619 bool is_kfence = is_kfence_address(ptr); 5620 5621 ptr = kasan_reset_tag(ptr); 5622 5623 /* Find object and usable object size. */ 5624 s = slab->slab_cache; 5625 5626 /* Reject impossible pointers. */ 5627 if (ptr < slab_address(slab)) 5628 usercopy_abort("SLUB object not in SLUB page?!", NULL, 5629 to_user, 0, n); 5630 5631 /* Find offset within object. */ 5632 if (is_kfence) 5633 offset = ptr - kfence_object_start(ptr); 5634 else 5635 offset = (ptr - slab_address(slab)) % s->size; 5636 5637 /* Adjust for redzone and reject if within the redzone. */ 5638 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 5639 if (offset < s->red_left_pad) 5640 usercopy_abort("SLUB object in left red zone", 5641 s->name, to_user, offset, n); 5642 offset -= s->red_left_pad; 5643 } 5644 5645 /* Allow address range falling entirely within usercopy region. */ 5646 if (offset >= s->useroffset && 5647 offset - s->useroffset <= s->usersize && 5648 n <= s->useroffset - offset + s->usersize) 5649 return; 5650 5651 usercopy_abort("SLUB object", s->name, to_user, offset, n); 5652 } 5653 #endif /* CONFIG_HARDENED_USERCOPY */ 5654 5655 #define SHRINK_PROMOTE_MAX 32 5656 5657 /* 5658 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 5659 * up most to the head of the partial lists. New allocations will then 5660 * fill those up and thus they can be removed from the partial lists. 5661 * 5662 * The slabs with the least items are placed last. This results in them 5663 * being allocated from last increasing the chance that the last objects 5664 * are freed in them. 5665 */ 5666 static int __kmem_cache_do_shrink(struct kmem_cache *s) 5667 { 5668 int node; 5669 int i; 5670 struct kmem_cache_node *n; 5671 struct slab *slab; 5672 struct slab *t; 5673 struct list_head discard; 5674 struct list_head promote[SHRINK_PROMOTE_MAX]; 5675 unsigned long flags; 5676 int ret = 0; 5677 5678 for_each_kmem_cache_node(s, node, n) { 5679 INIT_LIST_HEAD(&discard); 5680 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 5681 INIT_LIST_HEAD(promote + i); 5682 5683 spin_lock_irqsave(&n->list_lock, flags); 5684 5685 /* 5686 * Build lists of slabs to discard or promote. 5687 * 5688 * Note that concurrent frees may occur while we hold the 5689 * list_lock. slab->inuse here is the upper limit. 5690 */ 5691 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 5692 int free = slab->objects - slab->inuse; 5693 5694 /* Do not reread slab->inuse */ 5695 barrier(); 5696 5697 /* We do not keep full slabs on the list */ 5698 BUG_ON(free <= 0); 5699 5700 if (free == slab->objects) { 5701 list_move(&slab->slab_list, &discard); 5702 slab_clear_node_partial(slab); 5703 n->nr_partial--; 5704 dec_slabs_node(s, node, slab->objects); 5705 } else if (free <= SHRINK_PROMOTE_MAX) 5706 list_move(&slab->slab_list, promote + free - 1); 5707 } 5708 5709 /* 5710 * Promote the slabs filled up most to the head of the 5711 * partial list. 5712 */ 5713 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 5714 list_splice(promote + i, &n->partial); 5715 5716 spin_unlock_irqrestore(&n->list_lock, flags); 5717 5718 /* Release empty slabs */ 5719 list_for_each_entry_safe(slab, t, &discard, slab_list) 5720 free_slab(s, slab); 5721 5722 if (node_nr_slabs(n)) 5723 ret = 1; 5724 } 5725 5726 return ret; 5727 } 5728 5729 int __kmem_cache_shrink(struct kmem_cache *s) 5730 { 5731 flush_all(s); 5732 return __kmem_cache_do_shrink(s); 5733 } 5734 5735 static int slab_mem_going_offline_callback(void *arg) 5736 { 5737 struct kmem_cache *s; 5738 5739 mutex_lock(&slab_mutex); 5740 list_for_each_entry(s, &slab_caches, list) { 5741 flush_all_cpus_locked(s); 5742 __kmem_cache_do_shrink(s); 5743 } 5744 mutex_unlock(&slab_mutex); 5745 5746 return 0; 5747 } 5748 5749 static void slab_mem_offline_callback(void *arg) 5750 { 5751 struct memory_notify *marg = arg; 5752 int offline_node; 5753 5754 offline_node = marg->status_change_nid_normal; 5755 5756 /* 5757 * If the node still has available memory. we need kmem_cache_node 5758 * for it yet. 5759 */ 5760 if (offline_node < 0) 5761 return; 5762 5763 mutex_lock(&slab_mutex); 5764 node_clear(offline_node, slab_nodes); 5765 /* 5766 * We no longer free kmem_cache_node structures here, as it would be 5767 * racy with all get_node() users, and infeasible to protect them with 5768 * slab_mutex. 5769 */ 5770 mutex_unlock(&slab_mutex); 5771 } 5772 5773 static int slab_mem_going_online_callback(void *arg) 5774 { 5775 struct kmem_cache_node *n; 5776 struct kmem_cache *s; 5777 struct memory_notify *marg = arg; 5778 int nid = marg->status_change_nid_normal; 5779 int ret = 0; 5780 5781 /* 5782 * If the node's memory is already available, then kmem_cache_node is 5783 * already created. Nothing to do. 5784 */ 5785 if (nid < 0) 5786 return 0; 5787 5788 /* 5789 * We are bringing a node online. No memory is available yet. We must 5790 * allocate a kmem_cache_node structure in order to bring the node 5791 * online. 5792 */ 5793 mutex_lock(&slab_mutex); 5794 list_for_each_entry(s, &slab_caches, list) { 5795 /* 5796 * The structure may already exist if the node was previously 5797 * onlined and offlined. 5798 */ 5799 if (get_node(s, nid)) 5800 continue; 5801 /* 5802 * XXX: kmem_cache_alloc_node will fallback to other nodes 5803 * since memory is not yet available from the node that 5804 * is brought up. 5805 */ 5806 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 5807 if (!n) { 5808 ret = -ENOMEM; 5809 goto out; 5810 } 5811 init_kmem_cache_node(n); 5812 s->node[nid] = n; 5813 } 5814 /* 5815 * Any cache created after this point will also have kmem_cache_node 5816 * initialized for the new node. 5817 */ 5818 node_set(nid, slab_nodes); 5819 out: 5820 mutex_unlock(&slab_mutex); 5821 return ret; 5822 } 5823 5824 static int slab_memory_callback(struct notifier_block *self, 5825 unsigned long action, void *arg) 5826 { 5827 int ret = 0; 5828 5829 switch (action) { 5830 case MEM_GOING_ONLINE: 5831 ret = slab_mem_going_online_callback(arg); 5832 break; 5833 case MEM_GOING_OFFLINE: 5834 ret = slab_mem_going_offline_callback(arg); 5835 break; 5836 case MEM_OFFLINE: 5837 case MEM_CANCEL_ONLINE: 5838 slab_mem_offline_callback(arg); 5839 break; 5840 case MEM_ONLINE: 5841 case MEM_CANCEL_OFFLINE: 5842 break; 5843 } 5844 if (ret) 5845 ret = notifier_from_errno(ret); 5846 else 5847 ret = NOTIFY_OK; 5848 return ret; 5849 } 5850 5851 /******************************************************************** 5852 * Basic setup of slabs 5853 *******************************************************************/ 5854 5855 /* 5856 * Used for early kmem_cache structures that were allocated using 5857 * the page allocator. Allocate them properly then fix up the pointers 5858 * that may be pointing to the wrong kmem_cache structure. 5859 */ 5860 5861 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 5862 { 5863 int node; 5864 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 5865 struct kmem_cache_node *n; 5866 5867 memcpy(s, static_cache, kmem_cache->object_size); 5868 5869 /* 5870 * This runs very early, and only the boot processor is supposed to be 5871 * up. Even if it weren't true, IRQs are not up so we couldn't fire 5872 * IPIs around. 5873 */ 5874 __flush_cpu_slab(s, smp_processor_id()); 5875 for_each_kmem_cache_node(s, node, n) { 5876 struct slab *p; 5877 5878 list_for_each_entry(p, &n->partial, slab_list) 5879 p->slab_cache = s; 5880 5881 #ifdef CONFIG_SLUB_DEBUG 5882 list_for_each_entry(p, &n->full, slab_list) 5883 p->slab_cache = s; 5884 #endif 5885 } 5886 list_add(&s->list, &slab_caches); 5887 return s; 5888 } 5889 5890 void __init kmem_cache_init(void) 5891 { 5892 static __initdata struct kmem_cache boot_kmem_cache, 5893 boot_kmem_cache_node; 5894 int node; 5895 5896 if (debug_guardpage_minorder()) 5897 slub_max_order = 0; 5898 5899 /* Print slub debugging pointers without hashing */ 5900 if (__slub_debug_enabled()) 5901 no_hash_pointers_enable(NULL); 5902 5903 kmem_cache_node = &boot_kmem_cache_node; 5904 kmem_cache = &boot_kmem_cache; 5905 5906 /* 5907 * Initialize the nodemask for which we will allocate per node 5908 * structures. Here we don't need taking slab_mutex yet. 5909 */ 5910 for_each_node_state(node, N_NORMAL_MEMORY) 5911 node_set(node, slab_nodes); 5912 5913 create_boot_cache(kmem_cache_node, "kmem_cache_node", 5914 sizeof(struct kmem_cache_node), 5915 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 5916 5917 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 5918 5919 /* Able to allocate the per node structures */ 5920 slab_state = PARTIAL; 5921 5922 create_boot_cache(kmem_cache, "kmem_cache", 5923 offsetof(struct kmem_cache, node) + 5924 nr_node_ids * sizeof(struct kmem_cache_node *), 5925 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 5926 5927 kmem_cache = bootstrap(&boot_kmem_cache); 5928 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 5929 5930 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 5931 setup_kmalloc_cache_index_table(); 5932 create_kmalloc_caches(); 5933 5934 /* Setup random freelists for each cache */ 5935 init_freelist_randomization(); 5936 5937 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 5938 slub_cpu_dead); 5939 5940 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 5941 cache_line_size(), 5942 slub_min_order, slub_max_order, slub_min_objects, 5943 nr_cpu_ids, nr_node_ids); 5944 } 5945 5946 void __init kmem_cache_init_late(void) 5947 { 5948 #ifndef CONFIG_SLUB_TINY 5949 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 5950 WARN_ON(!flushwq); 5951 #endif 5952 } 5953 5954 struct kmem_cache * 5955 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 5956 slab_flags_t flags, void (*ctor)(void *)) 5957 { 5958 struct kmem_cache *s; 5959 5960 s = find_mergeable(size, align, flags, name, ctor); 5961 if (s) { 5962 if (sysfs_slab_alias(s, name)) 5963 return NULL; 5964 5965 s->refcount++; 5966 5967 /* 5968 * Adjust the object sizes so that we clear 5969 * the complete object on kzalloc. 5970 */ 5971 s->object_size = max(s->object_size, size); 5972 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 5973 } 5974 5975 return s; 5976 } 5977 5978 int do_kmem_cache_create(struct kmem_cache *s, const char *name, 5979 unsigned int size, struct kmem_cache_args *args, 5980 slab_flags_t flags) 5981 { 5982 int err = -EINVAL; 5983 5984 s->name = name; 5985 s->size = s->object_size = size; 5986 5987 s->flags = kmem_cache_flags(flags, s->name); 5988 #ifdef CONFIG_SLAB_FREELIST_HARDENED 5989 s->random = get_random_long(); 5990 #endif 5991 s->align = args->align; 5992 s->ctor = args->ctor; 5993 #ifdef CONFIG_HARDENED_USERCOPY 5994 s->useroffset = args->useroffset; 5995 s->usersize = args->usersize; 5996 #endif 5997 5998 if (!calculate_sizes(args, s)) 5999 goto out; 6000 if (disable_higher_order_debug) { 6001 /* 6002 * Disable debugging flags that store metadata if the min slab 6003 * order increased. 6004 */ 6005 if (get_order(s->size) > get_order(s->object_size)) { 6006 s->flags &= ~DEBUG_METADATA_FLAGS; 6007 s->offset = 0; 6008 if (!calculate_sizes(args, s)) 6009 goto out; 6010 } 6011 } 6012 6013 #ifdef system_has_freelist_aba 6014 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 6015 /* Enable fast mode */ 6016 s->flags |= __CMPXCHG_DOUBLE; 6017 } 6018 #endif 6019 6020 /* 6021 * The larger the object size is, the more slabs we want on the partial 6022 * list to avoid pounding the page allocator excessively. 6023 */ 6024 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 6025 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 6026 6027 set_cpu_partial(s); 6028 6029 #ifdef CONFIG_NUMA 6030 s->remote_node_defrag_ratio = 1000; 6031 #endif 6032 6033 /* Initialize the pre-computed randomized freelist if slab is up */ 6034 if (slab_state >= UP) { 6035 if (init_cache_random_seq(s)) 6036 goto out; 6037 } 6038 6039 if (!init_kmem_cache_nodes(s)) 6040 goto out; 6041 6042 if (!alloc_kmem_cache_cpus(s)) 6043 goto out; 6044 6045 /* Mutex is not taken during early boot */ 6046 if (slab_state <= UP) { 6047 err = 0; 6048 goto out; 6049 } 6050 6051 err = sysfs_slab_add(s); 6052 if (err) 6053 goto out; 6054 6055 if (s->flags & SLAB_STORE_USER) 6056 debugfs_slab_add(s); 6057 6058 out: 6059 if (err) 6060 __kmem_cache_release(s); 6061 return err; 6062 } 6063 6064 #ifdef SLAB_SUPPORTS_SYSFS 6065 static int count_inuse(struct slab *slab) 6066 { 6067 return slab->inuse; 6068 } 6069 6070 static int count_total(struct slab *slab) 6071 { 6072 return slab->objects; 6073 } 6074 #endif 6075 6076 #ifdef CONFIG_SLUB_DEBUG 6077 static void validate_slab(struct kmem_cache *s, struct slab *slab, 6078 unsigned long *obj_map) 6079 { 6080 void *p; 6081 void *addr = slab_address(slab); 6082 6083 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 6084 return; 6085 6086 /* Now we know that a valid freelist exists */ 6087 __fill_map(obj_map, s, slab); 6088 for_each_object(p, s, addr, slab->objects) { 6089 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 6090 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 6091 6092 if (!check_object(s, slab, p, val)) 6093 break; 6094 } 6095 } 6096 6097 static int validate_slab_node(struct kmem_cache *s, 6098 struct kmem_cache_node *n, unsigned long *obj_map) 6099 { 6100 unsigned long count = 0; 6101 struct slab *slab; 6102 unsigned long flags; 6103 6104 spin_lock_irqsave(&n->list_lock, flags); 6105 6106 list_for_each_entry(slab, &n->partial, slab_list) { 6107 validate_slab(s, slab, obj_map); 6108 count++; 6109 } 6110 if (count != n->nr_partial) { 6111 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 6112 s->name, count, n->nr_partial); 6113 slab_add_kunit_errors(); 6114 } 6115 6116 if (!(s->flags & SLAB_STORE_USER)) 6117 goto out; 6118 6119 list_for_each_entry(slab, &n->full, slab_list) { 6120 validate_slab(s, slab, obj_map); 6121 count++; 6122 } 6123 if (count != node_nr_slabs(n)) { 6124 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 6125 s->name, count, node_nr_slabs(n)); 6126 slab_add_kunit_errors(); 6127 } 6128 6129 out: 6130 spin_unlock_irqrestore(&n->list_lock, flags); 6131 return count; 6132 } 6133 6134 long validate_slab_cache(struct kmem_cache *s) 6135 { 6136 int node; 6137 unsigned long count = 0; 6138 struct kmem_cache_node *n; 6139 unsigned long *obj_map; 6140 6141 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 6142 if (!obj_map) 6143 return -ENOMEM; 6144 6145 flush_all(s); 6146 for_each_kmem_cache_node(s, node, n) 6147 count += validate_slab_node(s, n, obj_map); 6148 6149 bitmap_free(obj_map); 6150 6151 return count; 6152 } 6153 EXPORT_SYMBOL(validate_slab_cache); 6154 6155 #ifdef CONFIG_DEBUG_FS 6156 /* 6157 * Generate lists of code addresses where slabcache objects are allocated 6158 * and freed. 6159 */ 6160 6161 struct location { 6162 depot_stack_handle_t handle; 6163 unsigned long count; 6164 unsigned long addr; 6165 unsigned long waste; 6166 long long sum_time; 6167 long min_time; 6168 long max_time; 6169 long min_pid; 6170 long max_pid; 6171 DECLARE_BITMAP(cpus, NR_CPUS); 6172 nodemask_t nodes; 6173 }; 6174 6175 struct loc_track { 6176 unsigned long max; 6177 unsigned long count; 6178 struct location *loc; 6179 loff_t idx; 6180 }; 6181 6182 static struct dentry *slab_debugfs_root; 6183 6184 static void free_loc_track(struct loc_track *t) 6185 { 6186 if (t->max) 6187 free_pages((unsigned long)t->loc, 6188 get_order(sizeof(struct location) * t->max)); 6189 } 6190 6191 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 6192 { 6193 struct location *l; 6194 int order; 6195 6196 order = get_order(sizeof(struct location) * max); 6197 6198 l = (void *)__get_free_pages(flags, order); 6199 if (!l) 6200 return 0; 6201 6202 if (t->count) { 6203 memcpy(l, t->loc, sizeof(struct location) * t->count); 6204 free_loc_track(t); 6205 } 6206 t->max = max; 6207 t->loc = l; 6208 return 1; 6209 } 6210 6211 static int add_location(struct loc_track *t, struct kmem_cache *s, 6212 const struct track *track, 6213 unsigned int orig_size) 6214 { 6215 long start, end, pos; 6216 struct location *l; 6217 unsigned long caddr, chandle, cwaste; 6218 unsigned long age = jiffies - track->when; 6219 depot_stack_handle_t handle = 0; 6220 unsigned int waste = s->object_size - orig_size; 6221 6222 #ifdef CONFIG_STACKDEPOT 6223 handle = READ_ONCE(track->handle); 6224 #endif 6225 start = -1; 6226 end = t->count; 6227 6228 for ( ; ; ) { 6229 pos = start + (end - start + 1) / 2; 6230 6231 /* 6232 * There is nothing at "end". If we end up there 6233 * we need to add something to before end. 6234 */ 6235 if (pos == end) 6236 break; 6237 6238 l = &t->loc[pos]; 6239 caddr = l->addr; 6240 chandle = l->handle; 6241 cwaste = l->waste; 6242 if ((track->addr == caddr) && (handle == chandle) && 6243 (waste == cwaste)) { 6244 6245 l->count++; 6246 if (track->when) { 6247 l->sum_time += age; 6248 if (age < l->min_time) 6249 l->min_time = age; 6250 if (age > l->max_time) 6251 l->max_time = age; 6252 6253 if (track->pid < l->min_pid) 6254 l->min_pid = track->pid; 6255 if (track->pid > l->max_pid) 6256 l->max_pid = track->pid; 6257 6258 cpumask_set_cpu(track->cpu, 6259 to_cpumask(l->cpus)); 6260 } 6261 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6262 return 1; 6263 } 6264 6265 if (track->addr < caddr) 6266 end = pos; 6267 else if (track->addr == caddr && handle < chandle) 6268 end = pos; 6269 else if (track->addr == caddr && handle == chandle && 6270 waste < cwaste) 6271 end = pos; 6272 else 6273 start = pos; 6274 } 6275 6276 /* 6277 * Not found. Insert new tracking element. 6278 */ 6279 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 6280 return 0; 6281 6282 l = t->loc + pos; 6283 if (pos < t->count) 6284 memmove(l + 1, l, 6285 (t->count - pos) * sizeof(struct location)); 6286 t->count++; 6287 l->count = 1; 6288 l->addr = track->addr; 6289 l->sum_time = age; 6290 l->min_time = age; 6291 l->max_time = age; 6292 l->min_pid = track->pid; 6293 l->max_pid = track->pid; 6294 l->handle = handle; 6295 l->waste = waste; 6296 cpumask_clear(to_cpumask(l->cpus)); 6297 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 6298 nodes_clear(l->nodes); 6299 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6300 return 1; 6301 } 6302 6303 static void process_slab(struct loc_track *t, struct kmem_cache *s, 6304 struct slab *slab, enum track_item alloc, 6305 unsigned long *obj_map) 6306 { 6307 void *addr = slab_address(slab); 6308 bool is_alloc = (alloc == TRACK_ALLOC); 6309 void *p; 6310 6311 __fill_map(obj_map, s, slab); 6312 6313 for_each_object(p, s, addr, slab->objects) 6314 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 6315 add_location(t, s, get_track(s, p, alloc), 6316 is_alloc ? get_orig_size(s, p) : 6317 s->object_size); 6318 } 6319 #endif /* CONFIG_DEBUG_FS */ 6320 #endif /* CONFIG_SLUB_DEBUG */ 6321 6322 #ifdef SLAB_SUPPORTS_SYSFS 6323 enum slab_stat_type { 6324 SL_ALL, /* All slabs */ 6325 SL_PARTIAL, /* Only partially allocated slabs */ 6326 SL_CPU, /* Only slabs used for cpu caches */ 6327 SL_OBJECTS, /* Determine allocated objects not slabs */ 6328 SL_TOTAL /* Determine object capacity not slabs */ 6329 }; 6330 6331 #define SO_ALL (1 << SL_ALL) 6332 #define SO_PARTIAL (1 << SL_PARTIAL) 6333 #define SO_CPU (1 << SL_CPU) 6334 #define SO_OBJECTS (1 << SL_OBJECTS) 6335 #define SO_TOTAL (1 << SL_TOTAL) 6336 6337 static ssize_t show_slab_objects(struct kmem_cache *s, 6338 char *buf, unsigned long flags) 6339 { 6340 unsigned long total = 0; 6341 int node; 6342 int x; 6343 unsigned long *nodes; 6344 int len = 0; 6345 6346 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 6347 if (!nodes) 6348 return -ENOMEM; 6349 6350 if (flags & SO_CPU) { 6351 int cpu; 6352 6353 for_each_possible_cpu(cpu) { 6354 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 6355 cpu); 6356 int node; 6357 struct slab *slab; 6358 6359 slab = READ_ONCE(c->slab); 6360 if (!slab) 6361 continue; 6362 6363 node = slab_nid(slab); 6364 if (flags & SO_TOTAL) 6365 x = slab->objects; 6366 else if (flags & SO_OBJECTS) 6367 x = slab->inuse; 6368 else 6369 x = 1; 6370 6371 total += x; 6372 nodes[node] += x; 6373 6374 #ifdef CONFIG_SLUB_CPU_PARTIAL 6375 slab = slub_percpu_partial_read_once(c); 6376 if (slab) { 6377 node = slab_nid(slab); 6378 if (flags & SO_TOTAL) 6379 WARN_ON_ONCE(1); 6380 else if (flags & SO_OBJECTS) 6381 WARN_ON_ONCE(1); 6382 else 6383 x = data_race(slab->slabs); 6384 total += x; 6385 nodes[node] += x; 6386 } 6387 #endif 6388 } 6389 } 6390 6391 /* 6392 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 6393 * already held which will conflict with an existing lock order: 6394 * 6395 * mem_hotplug_lock->slab_mutex->kernfs_mutex 6396 * 6397 * We don't really need mem_hotplug_lock (to hold off 6398 * slab_mem_going_offline_callback) here because slab's memory hot 6399 * unplug code doesn't destroy the kmem_cache->node[] data. 6400 */ 6401 6402 #ifdef CONFIG_SLUB_DEBUG 6403 if (flags & SO_ALL) { 6404 struct kmem_cache_node *n; 6405 6406 for_each_kmem_cache_node(s, node, n) { 6407 6408 if (flags & SO_TOTAL) 6409 x = node_nr_objs(n); 6410 else if (flags & SO_OBJECTS) 6411 x = node_nr_objs(n) - count_partial(n, count_free); 6412 else 6413 x = node_nr_slabs(n); 6414 total += x; 6415 nodes[node] += x; 6416 } 6417 6418 } else 6419 #endif 6420 if (flags & SO_PARTIAL) { 6421 struct kmem_cache_node *n; 6422 6423 for_each_kmem_cache_node(s, node, n) { 6424 if (flags & SO_TOTAL) 6425 x = count_partial(n, count_total); 6426 else if (flags & SO_OBJECTS) 6427 x = count_partial(n, count_inuse); 6428 else 6429 x = n->nr_partial; 6430 total += x; 6431 nodes[node] += x; 6432 } 6433 } 6434 6435 len += sysfs_emit_at(buf, len, "%lu", total); 6436 #ifdef CONFIG_NUMA 6437 for (node = 0; node < nr_node_ids; node++) { 6438 if (nodes[node]) 6439 len += sysfs_emit_at(buf, len, " N%d=%lu", 6440 node, nodes[node]); 6441 } 6442 #endif 6443 len += sysfs_emit_at(buf, len, "\n"); 6444 kfree(nodes); 6445 6446 return len; 6447 } 6448 6449 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 6450 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 6451 6452 struct slab_attribute { 6453 struct attribute attr; 6454 ssize_t (*show)(struct kmem_cache *s, char *buf); 6455 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 6456 }; 6457 6458 #define SLAB_ATTR_RO(_name) \ 6459 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 6460 6461 #define SLAB_ATTR(_name) \ 6462 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 6463 6464 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 6465 { 6466 return sysfs_emit(buf, "%u\n", s->size); 6467 } 6468 SLAB_ATTR_RO(slab_size); 6469 6470 static ssize_t align_show(struct kmem_cache *s, char *buf) 6471 { 6472 return sysfs_emit(buf, "%u\n", s->align); 6473 } 6474 SLAB_ATTR_RO(align); 6475 6476 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 6477 { 6478 return sysfs_emit(buf, "%u\n", s->object_size); 6479 } 6480 SLAB_ATTR_RO(object_size); 6481 6482 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 6483 { 6484 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 6485 } 6486 SLAB_ATTR_RO(objs_per_slab); 6487 6488 static ssize_t order_show(struct kmem_cache *s, char *buf) 6489 { 6490 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 6491 } 6492 SLAB_ATTR_RO(order); 6493 6494 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 6495 { 6496 return sysfs_emit(buf, "%lu\n", s->min_partial); 6497 } 6498 6499 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 6500 size_t length) 6501 { 6502 unsigned long min; 6503 int err; 6504 6505 err = kstrtoul(buf, 10, &min); 6506 if (err) 6507 return err; 6508 6509 s->min_partial = min; 6510 return length; 6511 } 6512 SLAB_ATTR(min_partial); 6513 6514 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 6515 { 6516 unsigned int nr_partial = 0; 6517 #ifdef CONFIG_SLUB_CPU_PARTIAL 6518 nr_partial = s->cpu_partial; 6519 #endif 6520 6521 return sysfs_emit(buf, "%u\n", nr_partial); 6522 } 6523 6524 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 6525 size_t length) 6526 { 6527 unsigned int objects; 6528 int err; 6529 6530 err = kstrtouint(buf, 10, &objects); 6531 if (err) 6532 return err; 6533 if (objects && !kmem_cache_has_cpu_partial(s)) 6534 return -EINVAL; 6535 6536 slub_set_cpu_partial(s, objects); 6537 flush_all(s); 6538 return length; 6539 } 6540 SLAB_ATTR(cpu_partial); 6541 6542 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 6543 { 6544 if (!s->ctor) 6545 return 0; 6546 return sysfs_emit(buf, "%pS\n", s->ctor); 6547 } 6548 SLAB_ATTR_RO(ctor); 6549 6550 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 6551 { 6552 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 6553 } 6554 SLAB_ATTR_RO(aliases); 6555 6556 static ssize_t partial_show(struct kmem_cache *s, char *buf) 6557 { 6558 return show_slab_objects(s, buf, SO_PARTIAL); 6559 } 6560 SLAB_ATTR_RO(partial); 6561 6562 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 6563 { 6564 return show_slab_objects(s, buf, SO_CPU); 6565 } 6566 SLAB_ATTR_RO(cpu_slabs); 6567 6568 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 6569 { 6570 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 6571 } 6572 SLAB_ATTR_RO(objects_partial); 6573 6574 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 6575 { 6576 int objects = 0; 6577 int slabs = 0; 6578 int cpu __maybe_unused; 6579 int len = 0; 6580 6581 #ifdef CONFIG_SLUB_CPU_PARTIAL 6582 for_each_online_cpu(cpu) { 6583 struct slab *slab; 6584 6585 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6586 6587 if (slab) 6588 slabs += data_race(slab->slabs); 6589 } 6590 #endif 6591 6592 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 6593 objects = (slabs * oo_objects(s->oo)) / 2; 6594 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 6595 6596 #ifdef CONFIG_SLUB_CPU_PARTIAL 6597 for_each_online_cpu(cpu) { 6598 struct slab *slab; 6599 6600 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6601 if (slab) { 6602 slabs = data_race(slab->slabs); 6603 objects = (slabs * oo_objects(s->oo)) / 2; 6604 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 6605 cpu, objects, slabs); 6606 } 6607 } 6608 #endif 6609 len += sysfs_emit_at(buf, len, "\n"); 6610 6611 return len; 6612 } 6613 SLAB_ATTR_RO(slabs_cpu_partial); 6614 6615 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 6616 { 6617 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 6618 } 6619 SLAB_ATTR_RO(reclaim_account); 6620 6621 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 6622 { 6623 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 6624 } 6625 SLAB_ATTR_RO(hwcache_align); 6626 6627 #ifdef CONFIG_ZONE_DMA 6628 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 6629 { 6630 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 6631 } 6632 SLAB_ATTR_RO(cache_dma); 6633 #endif 6634 6635 #ifdef CONFIG_HARDENED_USERCOPY 6636 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 6637 { 6638 return sysfs_emit(buf, "%u\n", s->usersize); 6639 } 6640 SLAB_ATTR_RO(usersize); 6641 #endif 6642 6643 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 6644 { 6645 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 6646 } 6647 SLAB_ATTR_RO(destroy_by_rcu); 6648 6649 #ifdef CONFIG_SLUB_DEBUG 6650 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 6651 { 6652 return show_slab_objects(s, buf, SO_ALL); 6653 } 6654 SLAB_ATTR_RO(slabs); 6655 6656 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 6657 { 6658 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 6659 } 6660 SLAB_ATTR_RO(total_objects); 6661 6662 static ssize_t objects_show(struct kmem_cache *s, char *buf) 6663 { 6664 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 6665 } 6666 SLAB_ATTR_RO(objects); 6667 6668 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 6669 { 6670 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 6671 } 6672 SLAB_ATTR_RO(sanity_checks); 6673 6674 static ssize_t trace_show(struct kmem_cache *s, char *buf) 6675 { 6676 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 6677 } 6678 SLAB_ATTR_RO(trace); 6679 6680 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 6681 { 6682 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 6683 } 6684 6685 SLAB_ATTR_RO(red_zone); 6686 6687 static ssize_t poison_show(struct kmem_cache *s, char *buf) 6688 { 6689 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 6690 } 6691 6692 SLAB_ATTR_RO(poison); 6693 6694 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 6695 { 6696 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 6697 } 6698 6699 SLAB_ATTR_RO(store_user); 6700 6701 static ssize_t validate_show(struct kmem_cache *s, char *buf) 6702 { 6703 return 0; 6704 } 6705 6706 static ssize_t validate_store(struct kmem_cache *s, 6707 const char *buf, size_t length) 6708 { 6709 int ret = -EINVAL; 6710 6711 if (buf[0] == '1' && kmem_cache_debug(s)) { 6712 ret = validate_slab_cache(s); 6713 if (ret >= 0) 6714 ret = length; 6715 } 6716 return ret; 6717 } 6718 SLAB_ATTR(validate); 6719 6720 #endif /* CONFIG_SLUB_DEBUG */ 6721 6722 #ifdef CONFIG_FAILSLAB 6723 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 6724 { 6725 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 6726 } 6727 6728 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 6729 size_t length) 6730 { 6731 if (s->refcount > 1) 6732 return -EINVAL; 6733 6734 if (buf[0] == '1') 6735 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 6736 else 6737 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 6738 6739 return length; 6740 } 6741 SLAB_ATTR(failslab); 6742 #endif 6743 6744 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 6745 { 6746 return 0; 6747 } 6748 6749 static ssize_t shrink_store(struct kmem_cache *s, 6750 const char *buf, size_t length) 6751 { 6752 if (buf[0] == '1') 6753 kmem_cache_shrink(s); 6754 else 6755 return -EINVAL; 6756 return length; 6757 } 6758 SLAB_ATTR(shrink); 6759 6760 #ifdef CONFIG_NUMA 6761 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 6762 { 6763 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 6764 } 6765 6766 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 6767 const char *buf, size_t length) 6768 { 6769 unsigned int ratio; 6770 int err; 6771 6772 err = kstrtouint(buf, 10, &ratio); 6773 if (err) 6774 return err; 6775 if (ratio > 100) 6776 return -ERANGE; 6777 6778 s->remote_node_defrag_ratio = ratio * 10; 6779 6780 return length; 6781 } 6782 SLAB_ATTR(remote_node_defrag_ratio); 6783 #endif 6784 6785 #ifdef CONFIG_SLUB_STATS 6786 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 6787 { 6788 unsigned long sum = 0; 6789 int cpu; 6790 int len = 0; 6791 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 6792 6793 if (!data) 6794 return -ENOMEM; 6795 6796 for_each_online_cpu(cpu) { 6797 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 6798 6799 data[cpu] = x; 6800 sum += x; 6801 } 6802 6803 len += sysfs_emit_at(buf, len, "%lu", sum); 6804 6805 #ifdef CONFIG_SMP 6806 for_each_online_cpu(cpu) { 6807 if (data[cpu]) 6808 len += sysfs_emit_at(buf, len, " C%d=%u", 6809 cpu, data[cpu]); 6810 } 6811 #endif 6812 kfree(data); 6813 len += sysfs_emit_at(buf, len, "\n"); 6814 6815 return len; 6816 } 6817 6818 static void clear_stat(struct kmem_cache *s, enum stat_item si) 6819 { 6820 int cpu; 6821 6822 for_each_online_cpu(cpu) 6823 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 6824 } 6825 6826 #define STAT_ATTR(si, text) \ 6827 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 6828 { \ 6829 return show_stat(s, buf, si); \ 6830 } \ 6831 static ssize_t text##_store(struct kmem_cache *s, \ 6832 const char *buf, size_t length) \ 6833 { \ 6834 if (buf[0] != '0') \ 6835 return -EINVAL; \ 6836 clear_stat(s, si); \ 6837 return length; \ 6838 } \ 6839 SLAB_ATTR(text); \ 6840 6841 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 6842 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 6843 STAT_ATTR(FREE_FASTPATH, free_fastpath); 6844 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 6845 STAT_ATTR(FREE_FROZEN, free_frozen); 6846 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 6847 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 6848 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 6849 STAT_ATTR(ALLOC_SLAB, alloc_slab); 6850 STAT_ATTR(ALLOC_REFILL, alloc_refill); 6851 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 6852 STAT_ATTR(FREE_SLAB, free_slab); 6853 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 6854 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 6855 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 6856 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 6857 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 6858 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 6859 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 6860 STAT_ATTR(ORDER_FALLBACK, order_fallback); 6861 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 6862 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 6863 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 6864 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 6865 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 6866 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 6867 #endif /* CONFIG_SLUB_STATS */ 6868 6869 #ifdef CONFIG_KFENCE 6870 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 6871 { 6872 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 6873 } 6874 6875 static ssize_t skip_kfence_store(struct kmem_cache *s, 6876 const char *buf, size_t length) 6877 { 6878 int ret = length; 6879 6880 if (buf[0] == '0') 6881 s->flags &= ~SLAB_SKIP_KFENCE; 6882 else if (buf[0] == '1') 6883 s->flags |= SLAB_SKIP_KFENCE; 6884 else 6885 ret = -EINVAL; 6886 6887 return ret; 6888 } 6889 SLAB_ATTR(skip_kfence); 6890 #endif 6891 6892 static struct attribute *slab_attrs[] = { 6893 &slab_size_attr.attr, 6894 &object_size_attr.attr, 6895 &objs_per_slab_attr.attr, 6896 &order_attr.attr, 6897 &min_partial_attr.attr, 6898 &cpu_partial_attr.attr, 6899 &objects_partial_attr.attr, 6900 &partial_attr.attr, 6901 &cpu_slabs_attr.attr, 6902 &ctor_attr.attr, 6903 &aliases_attr.attr, 6904 &align_attr.attr, 6905 &hwcache_align_attr.attr, 6906 &reclaim_account_attr.attr, 6907 &destroy_by_rcu_attr.attr, 6908 &shrink_attr.attr, 6909 &slabs_cpu_partial_attr.attr, 6910 #ifdef CONFIG_SLUB_DEBUG 6911 &total_objects_attr.attr, 6912 &objects_attr.attr, 6913 &slabs_attr.attr, 6914 &sanity_checks_attr.attr, 6915 &trace_attr.attr, 6916 &red_zone_attr.attr, 6917 &poison_attr.attr, 6918 &store_user_attr.attr, 6919 &validate_attr.attr, 6920 #endif 6921 #ifdef CONFIG_ZONE_DMA 6922 &cache_dma_attr.attr, 6923 #endif 6924 #ifdef CONFIG_NUMA 6925 &remote_node_defrag_ratio_attr.attr, 6926 #endif 6927 #ifdef CONFIG_SLUB_STATS 6928 &alloc_fastpath_attr.attr, 6929 &alloc_slowpath_attr.attr, 6930 &free_fastpath_attr.attr, 6931 &free_slowpath_attr.attr, 6932 &free_frozen_attr.attr, 6933 &free_add_partial_attr.attr, 6934 &free_remove_partial_attr.attr, 6935 &alloc_from_partial_attr.attr, 6936 &alloc_slab_attr.attr, 6937 &alloc_refill_attr.attr, 6938 &alloc_node_mismatch_attr.attr, 6939 &free_slab_attr.attr, 6940 &cpuslab_flush_attr.attr, 6941 &deactivate_full_attr.attr, 6942 &deactivate_empty_attr.attr, 6943 &deactivate_to_head_attr.attr, 6944 &deactivate_to_tail_attr.attr, 6945 &deactivate_remote_frees_attr.attr, 6946 &deactivate_bypass_attr.attr, 6947 &order_fallback_attr.attr, 6948 &cmpxchg_double_fail_attr.attr, 6949 &cmpxchg_double_cpu_fail_attr.attr, 6950 &cpu_partial_alloc_attr.attr, 6951 &cpu_partial_free_attr.attr, 6952 &cpu_partial_node_attr.attr, 6953 &cpu_partial_drain_attr.attr, 6954 #endif 6955 #ifdef CONFIG_FAILSLAB 6956 &failslab_attr.attr, 6957 #endif 6958 #ifdef CONFIG_HARDENED_USERCOPY 6959 &usersize_attr.attr, 6960 #endif 6961 #ifdef CONFIG_KFENCE 6962 &skip_kfence_attr.attr, 6963 #endif 6964 6965 NULL 6966 }; 6967 6968 static const struct attribute_group slab_attr_group = { 6969 .attrs = slab_attrs, 6970 }; 6971 6972 static ssize_t slab_attr_show(struct kobject *kobj, 6973 struct attribute *attr, 6974 char *buf) 6975 { 6976 struct slab_attribute *attribute; 6977 struct kmem_cache *s; 6978 6979 attribute = to_slab_attr(attr); 6980 s = to_slab(kobj); 6981 6982 if (!attribute->show) 6983 return -EIO; 6984 6985 return attribute->show(s, buf); 6986 } 6987 6988 static ssize_t slab_attr_store(struct kobject *kobj, 6989 struct attribute *attr, 6990 const char *buf, size_t len) 6991 { 6992 struct slab_attribute *attribute; 6993 struct kmem_cache *s; 6994 6995 attribute = to_slab_attr(attr); 6996 s = to_slab(kobj); 6997 6998 if (!attribute->store) 6999 return -EIO; 7000 7001 return attribute->store(s, buf, len); 7002 } 7003 7004 static void kmem_cache_release(struct kobject *k) 7005 { 7006 slab_kmem_cache_release(to_slab(k)); 7007 } 7008 7009 static const struct sysfs_ops slab_sysfs_ops = { 7010 .show = slab_attr_show, 7011 .store = slab_attr_store, 7012 }; 7013 7014 static const struct kobj_type slab_ktype = { 7015 .sysfs_ops = &slab_sysfs_ops, 7016 .release = kmem_cache_release, 7017 }; 7018 7019 static struct kset *slab_kset; 7020 7021 static inline struct kset *cache_kset(struct kmem_cache *s) 7022 { 7023 return slab_kset; 7024 } 7025 7026 #define ID_STR_LENGTH 32 7027 7028 /* Create a unique string id for a slab cache: 7029 * 7030 * Format :[flags-]size 7031 */ 7032 static char *create_unique_id(struct kmem_cache *s) 7033 { 7034 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 7035 char *p = name; 7036 7037 if (!name) 7038 return ERR_PTR(-ENOMEM); 7039 7040 *p++ = ':'; 7041 /* 7042 * First flags affecting slabcache operations. We will only 7043 * get here for aliasable slabs so we do not need to support 7044 * too many flags. The flags here must cover all flags that 7045 * are matched during merging to guarantee that the id is 7046 * unique. 7047 */ 7048 if (s->flags & SLAB_CACHE_DMA) 7049 *p++ = 'd'; 7050 if (s->flags & SLAB_CACHE_DMA32) 7051 *p++ = 'D'; 7052 if (s->flags & SLAB_RECLAIM_ACCOUNT) 7053 *p++ = 'a'; 7054 if (s->flags & SLAB_CONSISTENCY_CHECKS) 7055 *p++ = 'F'; 7056 if (s->flags & SLAB_ACCOUNT) 7057 *p++ = 'A'; 7058 if (p != name + 1) 7059 *p++ = '-'; 7060 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 7061 7062 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 7063 kfree(name); 7064 return ERR_PTR(-EINVAL); 7065 } 7066 kmsan_unpoison_memory(name, p - name); 7067 return name; 7068 } 7069 7070 static int sysfs_slab_add(struct kmem_cache *s) 7071 { 7072 int err; 7073 const char *name; 7074 struct kset *kset = cache_kset(s); 7075 int unmergeable = slab_unmergeable(s); 7076 7077 if (!unmergeable && disable_higher_order_debug && 7078 (slub_debug & DEBUG_METADATA_FLAGS)) 7079 unmergeable = 1; 7080 7081 if (unmergeable) { 7082 /* 7083 * Slabcache can never be merged so we can use the name proper. 7084 * This is typically the case for debug situations. In that 7085 * case we can catch duplicate names easily. 7086 */ 7087 sysfs_remove_link(&slab_kset->kobj, s->name); 7088 name = s->name; 7089 } else { 7090 /* 7091 * Create a unique name for the slab as a target 7092 * for the symlinks. 7093 */ 7094 name = create_unique_id(s); 7095 if (IS_ERR(name)) 7096 return PTR_ERR(name); 7097 } 7098 7099 s->kobj.kset = kset; 7100 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 7101 if (err) 7102 goto out; 7103 7104 err = sysfs_create_group(&s->kobj, &slab_attr_group); 7105 if (err) 7106 goto out_del_kobj; 7107 7108 if (!unmergeable) { 7109 /* Setup first alias */ 7110 sysfs_slab_alias(s, s->name); 7111 } 7112 out: 7113 if (!unmergeable) 7114 kfree(name); 7115 return err; 7116 out_del_kobj: 7117 kobject_del(&s->kobj); 7118 goto out; 7119 } 7120 7121 void sysfs_slab_unlink(struct kmem_cache *s) 7122 { 7123 kobject_del(&s->kobj); 7124 } 7125 7126 void sysfs_slab_release(struct kmem_cache *s) 7127 { 7128 kobject_put(&s->kobj); 7129 } 7130 7131 /* 7132 * Need to buffer aliases during bootup until sysfs becomes 7133 * available lest we lose that information. 7134 */ 7135 struct saved_alias { 7136 struct kmem_cache *s; 7137 const char *name; 7138 struct saved_alias *next; 7139 }; 7140 7141 static struct saved_alias *alias_list; 7142 7143 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 7144 { 7145 struct saved_alias *al; 7146 7147 if (slab_state == FULL) { 7148 /* 7149 * If we have a leftover link then remove it. 7150 */ 7151 sysfs_remove_link(&slab_kset->kobj, name); 7152 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 7153 } 7154 7155 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 7156 if (!al) 7157 return -ENOMEM; 7158 7159 al->s = s; 7160 al->name = name; 7161 al->next = alias_list; 7162 alias_list = al; 7163 kmsan_unpoison_memory(al, sizeof(*al)); 7164 return 0; 7165 } 7166 7167 static int __init slab_sysfs_init(void) 7168 { 7169 struct kmem_cache *s; 7170 int err; 7171 7172 mutex_lock(&slab_mutex); 7173 7174 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 7175 if (!slab_kset) { 7176 mutex_unlock(&slab_mutex); 7177 pr_err("Cannot register slab subsystem.\n"); 7178 return -ENOMEM; 7179 } 7180 7181 slab_state = FULL; 7182 7183 list_for_each_entry(s, &slab_caches, list) { 7184 err = sysfs_slab_add(s); 7185 if (err) 7186 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 7187 s->name); 7188 } 7189 7190 while (alias_list) { 7191 struct saved_alias *al = alias_list; 7192 7193 alias_list = alias_list->next; 7194 err = sysfs_slab_alias(al->s, al->name); 7195 if (err) 7196 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 7197 al->name); 7198 kfree(al); 7199 } 7200 7201 mutex_unlock(&slab_mutex); 7202 return 0; 7203 } 7204 late_initcall(slab_sysfs_init); 7205 #endif /* SLAB_SUPPORTS_SYSFS */ 7206 7207 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 7208 static int slab_debugfs_show(struct seq_file *seq, void *v) 7209 { 7210 struct loc_track *t = seq->private; 7211 struct location *l; 7212 unsigned long idx; 7213 7214 idx = (unsigned long) t->idx; 7215 if (idx < t->count) { 7216 l = &t->loc[idx]; 7217 7218 seq_printf(seq, "%7ld ", l->count); 7219 7220 if (l->addr) 7221 seq_printf(seq, "%pS", (void *)l->addr); 7222 else 7223 seq_puts(seq, "<not-available>"); 7224 7225 if (l->waste) 7226 seq_printf(seq, " waste=%lu/%lu", 7227 l->count * l->waste, l->waste); 7228 7229 if (l->sum_time != l->min_time) { 7230 seq_printf(seq, " age=%ld/%llu/%ld", 7231 l->min_time, div_u64(l->sum_time, l->count), 7232 l->max_time); 7233 } else 7234 seq_printf(seq, " age=%ld", l->min_time); 7235 7236 if (l->min_pid != l->max_pid) 7237 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 7238 else 7239 seq_printf(seq, " pid=%ld", 7240 l->min_pid); 7241 7242 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 7243 seq_printf(seq, " cpus=%*pbl", 7244 cpumask_pr_args(to_cpumask(l->cpus))); 7245 7246 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 7247 seq_printf(seq, " nodes=%*pbl", 7248 nodemask_pr_args(&l->nodes)); 7249 7250 #ifdef CONFIG_STACKDEPOT 7251 { 7252 depot_stack_handle_t handle; 7253 unsigned long *entries; 7254 unsigned int nr_entries, j; 7255 7256 handle = READ_ONCE(l->handle); 7257 if (handle) { 7258 nr_entries = stack_depot_fetch(handle, &entries); 7259 seq_puts(seq, "\n"); 7260 for (j = 0; j < nr_entries; j++) 7261 seq_printf(seq, " %pS\n", (void *)entries[j]); 7262 } 7263 } 7264 #endif 7265 seq_puts(seq, "\n"); 7266 } 7267 7268 if (!idx && !t->count) 7269 seq_puts(seq, "No data\n"); 7270 7271 return 0; 7272 } 7273 7274 static void slab_debugfs_stop(struct seq_file *seq, void *v) 7275 { 7276 } 7277 7278 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 7279 { 7280 struct loc_track *t = seq->private; 7281 7282 t->idx = ++(*ppos); 7283 if (*ppos <= t->count) 7284 return ppos; 7285 7286 return NULL; 7287 } 7288 7289 static int cmp_loc_by_count(const void *a, const void *b, const void *data) 7290 { 7291 struct location *loc1 = (struct location *)a; 7292 struct location *loc2 = (struct location *)b; 7293 7294 if (loc1->count > loc2->count) 7295 return -1; 7296 else 7297 return 1; 7298 } 7299 7300 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 7301 { 7302 struct loc_track *t = seq->private; 7303 7304 t->idx = *ppos; 7305 return ppos; 7306 } 7307 7308 static const struct seq_operations slab_debugfs_sops = { 7309 .start = slab_debugfs_start, 7310 .next = slab_debugfs_next, 7311 .stop = slab_debugfs_stop, 7312 .show = slab_debugfs_show, 7313 }; 7314 7315 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 7316 { 7317 7318 struct kmem_cache_node *n; 7319 enum track_item alloc; 7320 int node; 7321 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 7322 sizeof(struct loc_track)); 7323 struct kmem_cache *s = file_inode(filep)->i_private; 7324 unsigned long *obj_map; 7325 7326 if (!t) 7327 return -ENOMEM; 7328 7329 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 7330 if (!obj_map) { 7331 seq_release_private(inode, filep); 7332 return -ENOMEM; 7333 } 7334 7335 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 7336 alloc = TRACK_ALLOC; 7337 else 7338 alloc = TRACK_FREE; 7339 7340 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 7341 bitmap_free(obj_map); 7342 seq_release_private(inode, filep); 7343 return -ENOMEM; 7344 } 7345 7346 for_each_kmem_cache_node(s, node, n) { 7347 unsigned long flags; 7348 struct slab *slab; 7349 7350 if (!node_nr_slabs(n)) 7351 continue; 7352 7353 spin_lock_irqsave(&n->list_lock, flags); 7354 list_for_each_entry(slab, &n->partial, slab_list) 7355 process_slab(t, s, slab, alloc, obj_map); 7356 list_for_each_entry(slab, &n->full, slab_list) 7357 process_slab(t, s, slab, alloc, obj_map); 7358 spin_unlock_irqrestore(&n->list_lock, flags); 7359 } 7360 7361 /* Sort locations by count */ 7362 sort_r(t->loc, t->count, sizeof(struct location), 7363 cmp_loc_by_count, NULL, NULL); 7364 7365 bitmap_free(obj_map); 7366 return 0; 7367 } 7368 7369 static int slab_debug_trace_release(struct inode *inode, struct file *file) 7370 { 7371 struct seq_file *seq = file->private_data; 7372 struct loc_track *t = seq->private; 7373 7374 free_loc_track(t); 7375 return seq_release_private(inode, file); 7376 } 7377 7378 static const struct file_operations slab_debugfs_fops = { 7379 .open = slab_debug_trace_open, 7380 .read = seq_read, 7381 .llseek = seq_lseek, 7382 .release = slab_debug_trace_release, 7383 }; 7384 7385 static void debugfs_slab_add(struct kmem_cache *s) 7386 { 7387 struct dentry *slab_cache_dir; 7388 7389 if (unlikely(!slab_debugfs_root)) 7390 return; 7391 7392 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 7393 7394 debugfs_create_file("alloc_traces", 0400, 7395 slab_cache_dir, s, &slab_debugfs_fops); 7396 7397 debugfs_create_file("free_traces", 0400, 7398 slab_cache_dir, s, &slab_debugfs_fops); 7399 } 7400 7401 void debugfs_slab_release(struct kmem_cache *s) 7402 { 7403 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 7404 } 7405 7406 static int __init slab_debugfs_init(void) 7407 { 7408 struct kmem_cache *s; 7409 7410 slab_debugfs_root = debugfs_create_dir("slab", NULL); 7411 7412 list_for_each_entry(s, &slab_caches, list) 7413 if (s->flags & SLAB_STORE_USER) 7414 debugfs_slab_add(s); 7415 7416 return 0; 7417 7418 } 7419 __initcall(slab_debugfs_init); 7420 #endif 7421 /* 7422 * The /proc/slabinfo ABI 7423 */ 7424 #ifdef CONFIG_SLUB_DEBUG 7425 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 7426 { 7427 unsigned long nr_slabs = 0; 7428 unsigned long nr_objs = 0; 7429 unsigned long nr_free = 0; 7430 int node; 7431 struct kmem_cache_node *n; 7432 7433 for_each_kmem_cache_node(s, node, n) { 7434 nr_slabs += node_nr_slabs(n); 7435 nr_objs += node_nr_objs(n); 7436 nr_free += count_partial_free_approx(n); 7437 } 7438 7439 sinfo->active_objs = nr_objs - nr_free; 7440 sinfo->num_objs = nr_objs; 7441 sinfo->active_slabs = nr_slabs; 7442 sinfo->num_slabs = nr_slabs; 7443 sinfo->objects_per_slab = oo_objects(s->oo); 7444 sinfo->cache_order = oo_order(s->oo); 7445 } 7446 #endif /* CONFIG_SLUB_DEBUG */ 7447