1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ctype.h> 30 #include <linux/stackdepot.h> 31 #include <linux/debugobjects.h> 32 #include <linux/kallsyms.h> 33 #include <linux/kfence.h> 34 #include <linux/memory.h> 35 #include <linux/math64.h> 36 #include <linux/fault-inject.h> 37 #include <linux/kmemleak.h> 38 #include <linux/stacktrace.h> 39 #include <linux/prefetch.h> 40 #include <linux/memcontrol.h> 41 #include <linux/random.h> 42 #include <kunit/test.h> 43 #include <kunit/test-bug.h> 44 #include <linux/sort.h> 45 46 #include <linux/debugfs.h> 47 #include <trace/events/kmem.h> 48 49 #include "internal.h" 50 51 /* 52 * Lock order: 53 * 1. slab_mutex (Global Mutex) 54 * 2. node->list_lock (Spinlock) 55 * 3. kmem_cache->cpu_slab->lock (Local lock) 56 * 4. slab_lock(slab) (Only on some arches) 57 * 5. object_map_lock (Only for debugging) 58 * 59 * slab_mutex 60 * 61 * The role of the slab_mutex is to protect the list of all the slabs 62 * and to synchronize major metadata changes to slab cache structures. 63 * Also synchronizes memory hotplug callbacks. 64 * 65 * slab_lock 66 * 67 * The slab_lock is a wrapper around the page lock, thus it is a bit 68 * spinlock. 69 * 70 * The slab_lock is only used on arches that do not have the ability 71 * to do a cmpxchg_double. It only protects: 72 * 73 * A. slab->freelist -> List of free objects in a slab 74 * B. slab->inuse -> Number of objects in use 75 * C. slab->objects -> Number of objects in slab 76 * D. slab->frozen -> frozen state 77 * 78 * Frozen slabs 79 * 80 * If a slab is frozen then it is exempt from list management. It is 81 * the cpu slab which is actively allocated from by the processor that 82 * froze it and it is not on any list. The processor that froze the 83 * slab is the one who can perform list operations on the slab. Other 84 * processors may put objects onto the freelist but the processor that 85 * froze the slab is the only one that can retrieve the objects from the 86 * slab's freelist. 87 * 88 * CPU partial slabs 89 * 90 * The partially empty slabs cached on the CPU partial list are used 91 * for performance reasons, which speeds up the allocation process. 92 * These slabs are not frozen, but are also exempt from list management, 93 * by clearing the PG_workingset flag when moving out of the node 94 * partial list. Please see __slab_free() for more details. 95 * 96 * To sum up, the current scheme is: 97 * - node partial slab: PG_Workingset && !frozen 98 * - cpu partial slab: !PG_Workingset && !frozen 99 * - cpu slab: !PG_Workingset && frozen 100 * - full slab: !PG_Workingset && !frozen 101 * 102 * list_lock 103 * 104 * The list_lock protects the partial and full list on each node and 105 * the partial slab counter. If taken then no new slabs may be added or 106 * removed from the lists nor make the number of partial slabs be modified. 107 * (Note that the total number of slabs is an atomic value that may be 108 * modified without taking the list lock). 109 * 110 * The list_lock is a centralized lock and thus we avoid taking it as 111 * much as possible. As long as SLUB does not have to handle partial 112 * slabs, operations can continue without any centralized lock. F.e. 113 * allocating a long series of objects that fill up slabs does not require 114 * the list lock. 115 * 116 * For debug caches, all allocations are forced to go through a list_lock 117 * protected region to serialize against concurrent validation. 118 * 119 * cpu_slab->lock local lock 120 * 121 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 122 * except the stat counters. This is a percpu structure manipulated only by 123 * the local cpu, so the lock protects against being preempted or interrupted 124 * by an irq. Fast path operations rely on lockless operations instead. 125 * 126 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 127 * which means the lockless fastpath cannot be used as it might interfere with 128 * an in-progress slow path operations. In this case the local lock is always 129 * taken but it still utilizes the freelist for the common operations. 130 * 131 * lockless fastpaths 132 * 133 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 134 * are fully lockless when satisfied from the percpu slab (and when 135 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 136 * They also don't disable preemption or migration or irqs. They rely on 137 * the transaction id (tid) field to detect being preempted or moved to 138 * another cpu. 139 * 140 * irq, preemption, migration considerations 141 * 142 * Interrupts are disabled as part of list_lock or local_lock operations, or 143 * around the slab_lock operation, in order to make the slab allocator safe 144 * to use in the context of an irq. 145 * 146 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 147 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 148 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 149 * doesn't have to be revalidated in each section protected by the local lock. 150 * 151 * SLUB assigns one slab for allocation to each processor. 152 * Allocations only occur from these slabs called cpu slabs. 153 * 154 * Slabs with free elements are kept on a partial list and during regular 155 * operations no list for full slabs is used. If an object in a full slab is 156 * freed then the slab will show up again on the partial lists. 157 * We track full slabs for debugging purposes though because otherwise we 158 * cannot scan all objects. 159 * 160 * Slabs are freed when they become empty. Teardown and setup is 161 * minimal so we rely on the page allocators per cpu caches for 162 * fast frees and allocs. 163 * 164 * slab->frozen The slab is frozen and exempt from list processing. 165 * This means that the slab is dedicated to a purpose 166 * such as satisfying allocations for a specific 167 * processor. Objects may be freed in the slab while 168 * it is frozen but slab_free will then skip the usual 169 * list operations. It is up to the processor holding 170 * the slab to integrate the slab into the slab lists 171 * when the slab is no longer needed. 172 * 173 * One use of this flag is to mark slabs that are 174 * used for allocations. Then such a slab becomes a cpu 175 * slab. The cpu slab may be equipped with an additional 176 * freelist that allows lockless access to 177 * free objects in addition to the regular freelist 178 * that requires the slab lock. 179 * 180 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 181 * options set. This moves slab handling out of 182 * the fast path and disables lockless freelists. 183 */ 184 185 /* 186 * We could simply use migrate_disable()/enable() but as long as it's a 187 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 188 */ 189 #ifndef CONFIG_PREEMPT_RT 190 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 191 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 192 #define USE_LOCKLESS_FAST_PATH() (true) 193 #else 194 #define slub_get_cpu_ptr(var) \ 195 ({ \ 196 migrate_disable(); \ 197 this_cpu_ptr(var); \ 198 }) 199 #define slub_put_cpu_ptr(var) \ 200 do { \ 201 (void)(var); \ 202 migrate_enable(); \ 203 } while (0) 204 #define USE_LOCKLESS_FAST_PATH() (false) 205 #endif 206 207 #ifndef CONFIG_SLUB_TINY 208 #define __fastpath_inline __always_inline 209 #else 210 #define __fastpath_inline 211 #endif 212 213 #ifdef CONFIG_SLUB_DEBUG 214 #ifdef CONFIG_SLUB_DEBUG_ON 215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 216 #else 217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 218 #endif 219 #endif /* CONFIG_SLUB_DEBUG */ 220 221 #ifdef CONFIG_NUMA 222 static DEFINE_STATIC_KEY_FALSE(strict_numa); 223 #endif 224 225 /* Structure holding parameters for get_partial() call chain */ 226 struct partial_context { 227 gfp_t flags; 228 unsigned int orig_size; 229 void *object; 230 }; 231 232 static inline bool kmem_cache_debug(struct kmem_cache *s) 233 { 234 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 235 } 236 237 void *fixup_red_left(struct kmem_cache *s, void *p) 238 { 239 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 240 p += s->red_left_pad; 241 242 return p; 243 } 244 245 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 246 { 247 #ifdef CONFIG_SLUB_CPU_PARTIAL 248 return !kmem_cache_debug(s); 249 #else 250 return false; 251 #endif 252 } 253 254 /* 255 * Issues still to be resolved: 256 * 257 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 258 * 259 * - Variable sizing of the per node arrays 260 */ 261 262 /* Enable to log cmpxchg failures */ 263 #undef SLUB_DEBUG_CMPXCHG 264 265 #ifndef CONFIG_SLUB_TINY 266 /* 267 * Minimum number of partial slabs. These will be left on the partial 268 * lists even if they are empty. kmem_cache_shrink may reclaim them. 269 */ 270 #define MIN_PARTIAL 5 271 272 /* 273 * Maximum number of desirable partial slabs. 274 * The existence of more partial slabs makes kmem_cache_shrink 275 * sort the partial list by the number of objects in use. 276 */ 277 #define MAX_PARTIAL 10 278 #else 279 #define MIN_PARTIAL 0 280 #define MAX_PARTIAL 0 281 #endif 282 283 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 284 SLAB_POISON | SLAB_STORE_USER) 285 286 /* 287 * These debug flags cannot use CMPXCHG because there might be consistency 288 * issues when checking or reading debug information 289 */ 290 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 291 SLAB_TRACE) 292 293 294 /* 295 * Debugging flags that require metadata to be stored in the slab. These get 296 * disabled when slab_debug=O is used and a cache's min order increases with 297 * metadata. 298 */ 299 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 300 301 #define OO_SHIFT 16 302 #define OO_MASK ((1 << OO_SHIFT) - 1) 303 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 304 305 /* Internal SLUB flags */ 306 /* Poison object */ 307 #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) 308 /* Use cmpxchg_double */ 309 310 #ifdef system_has_freelist_aba 311 #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) 312 #else 313 #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED 314 #endif 315 316 /* 317 * Tracking user of a slab. 318 */ 319 #define TRACK_ADDRS_COUNT 16 320 struct track { 321 unsigned long addr; /* Called from address */ 322 #ifdef CONFIG_STACKDEPOT 323 depot_stack_handle_t handle; 324 #endif 325 int cpu; /* Was running on cpu */ 326 int pid; /* Pid context */ 327 unsigned long when; /* When did the operation occur */ 328 }; 329 330 enum track_item { TRACK_ALLOC, TRACK_FREE }; 331 332 #ifdef SLAB_SUPPORTS_SYSFS 333 static int sysfs_slab_add(struct kmem_cache *); 334 static int sysfs_slab_alias(struct kmem_cache *, const char *); 335 #else 336 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 337 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 338 { return 0; } 339 #endif 340 341 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 342 static void debugfs_slab_add(struct kmem_cache *); 343 #else 344 static inline void debugfs_slab_add(struct kmem_cache *s) { } 345 #endif 346 347 enum stat_item { 348 ALLOC_FASTPATH, /* Allocation from cpu slab */ 349 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 350 FREE_FASTPATH, /* Free to cpu slab */ 351 FREE_SLOWPATH, /* Freeing not to cpu slab */ 352 FREE_FROZEN, /* Freeing to frozen slab */ 353 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 354 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 355 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 356 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 357 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 358 ALLOC_NODE_MISMATCH, /* Switching cpu slab */ 359 FREE_SLAB, /* Slab freed to the page allocator */ 360 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 361 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 362 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 363 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 364 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 365 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 366 DEACTIVATE_BYPASS, /* Implicit deactivation */ 367 ORDER_FALLBACK, /* Number of times fallback was necessary */ 368 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ 369 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ 370 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 371 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 372 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 373 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 374 NR_SLUB_STAT_ITEMS 375 }; 376 377 #ifndef CONFIG_SLUB_TINY 378 /* 379 * When changing the layout, make sure freelist and tid are still compatible 380 * with this_cpu_cmpxchg_double() alignment requirements. 381 */ 382 struct kmem_cache_cpu { 383 union { 384 struct { 385 void **freelist; /* Pointer to next available object */ 386 unsigned long tid; /* Globally unique transaction id */ 387 }; 388 freelist_aba_t freelist_tid; 389 }; 390 struct slab *slab; /* The slab from which we are allocating */ 391 #ifdef CONFIG_SLUB_CPU_PARTIAL 392 struct slab *partial; /* Partially allocated slabs */ 393 #endif 394 local_lock_t lock; /* Protects the fields above */ 395 #ifdef CONFIG_SLUB_STATS 396 unsigned int stat[NR_SLUB_STAT_ITEMS]; 397 #endif 398 }; 399 #endif /* CONFIG_SLUB_TINY */ 400 401 static inline void stat(const struct kmem_cache *s, enum stat_item si) 402 { 403 #ifdef CONFIG_SLUB_STATS 404 /* 405 * The rmw is racy on a preemptible kernel but this is acceptable, so 406 * avoid this_cpu_add()'s irq-disable overhead. 407 */ 408 raw_cpu_inc(s->cpu_slab->stat[si]); 409 #endif 410 } 411 412 static inline 413 void stat_add(const struct kmem_cache *s, enum stat_item si, int v) 414 { 415 #ifdef CONFIG_SLUB_STATS 416 raw_cpu_add(s->cpu_slab->stat[si], v); 417 #endif 418 } 419 420 /* 421 * The slab lists for all objects. 422 */ 423 struct kmem_cache_node { 424 spinlock_t list_lock; 425 unsigned long nr_partial; 426 struct list_head partial; 427 #ifdef CONFIG_SLUB_DEBUG 428 atomic_long_t nr_slabs; 429 atomic_long_t total_objects; 430 struct list_head full; 431 #endif 432 }; 433 434 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 435 { 436 return s->node[node]; 437 } 438 439 /* 440 * Iterator over all nodes. The body will be executed for each node that has 441 * a kmem_cache_node structure allocated (which is true for all online nodes) 442 */ 443 #define for_each_kmem_cache_node(__s, __node, __n) \ 444 for (__node = 0; __node < nr_node_ids; __node++) \ 445 if ((__n = get_node(__s, __node))) 446 447 /* 448 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 449 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 450 * differ during memory hotplug/hotremove operations. 451 * Protected by slab_mutex. 452 */ 453 static nodemask_t slab_nodes; 454 455 #ifndef CONFIG_SLUB_TINY 456 /* 457 * Workqueue used for flush_cpu_slab(). 458 */ 459 static struct workqueue_struct *flushwq; 460 #endif 461 462 /******************************************************************** 463 * Core slab cache functions 464 *******************************************************************/ 465 466 /* 467 * Returns freelist pointer (ptr). With hardening, this is obfuscated 468 * with an XOR of the address where the pointer is held and a per-cache 469 * random number. 470 */ 471 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, 472 void *ptr, unsigned long ptr_addr) 473 { 474 unsigned long encoded; 475 476 #ifdef CONFIG_SLAB_FREELIST_HARDENED 477 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); 478 #else 479 encoded = (unsigned long)ptr; 480 #endif 481 return (freeptr_t){.v = encoded}; 482 } 483 484 static inline void *freelist_ptr_decode(const struct kmem_cache *s, 485 freeptr_t ptr, unsigned long ptr_addr) 486 { 487 void *decoded; 488 489 #ifdef CONFIG_SLAB_FREELIST_HARDENED 490 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); 491 #else 492 decoded = (void *)ptr.v; 493 #endif 494 return decoded; 495 } 496 497 static inline void *get_freepointer(struct kmem_cache *s, void *object) 498 { 499 unsigned long ptr_addr; 500 freeptr_t p; 501 502 object = kasan_reset_tag(object); 503 ptr_addr = (unsigned long)object + s->offset; 504 p = *(freeptr_t *)(ptr_addr); 505 return freelist_ptr_decode(s, p, ptr_addr); 506 } 507 508 #ifndef CONFIG_SLUB_TINY 509 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 510 { 511 prefetchw(object + s->offset); 512 } 513 #endif 514 515 /* 516 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 517 * pointer value in the case the current thread loses the race for the next 518 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 519 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 520 * KMSAN will still check all arguments of cmpxchg because of imperfect 521 * handling of inline assembly. 522 * To work around this problem, we apply __no_kmsan_checks to ensure that 523 * get_freepointer_safe() returns initialized memory. 524 */ 525 __no_kmsan_checks 526 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 527 { 528 unsigned long freepointer_addr; 529 freeptr_t p; 530 531 if (!debug_pagealloc_enabled_static()) 532 return get_freepointer(s, object); 533 534 object = kasan_reset_tag(object); 535 freepointer_addr = (unsigned long)object + s->offset; 536 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); 537 return freelist_ptr_decode(s, p, freepointer_addr); 538 } 539 540 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 541 { 542 unsigned long freeptr_addr = (unsigned long)object + s->offset; 543 544 #ifdef CONFIG_SLAB_FREELIST_HARDENED 545 BUG_ON(object == fp); /* naive detection of double free or corruption */ 546 #endif 547 548 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 549 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); 550 } 551 552 /* 553 * See comment in calculate_sizes(). 554 */ 555 static inline bool freeptr_outside_object(struct kmem_cache *s) 556 { 557 return s->offset >= s->inuse; 558 } 559 560 /* 561 * Return offset of the end of info block which is inuse + free pointer if 562 * not overlapping with object. 563 */ 564 static inline unsigned int get_info_end(struct kmem_cache *s) 565 { 566 if (freeptr_outside_object(s)) 567 return s->inuse + sizeof(void *); 568 else 569 return s->inuse; 570 } 571 572 /* Loop over all objects in a slab */ 573 #define for_each_object(__p, __s, __addr, __objects) \ 574 for (__p = fixup_red_left(__s, __addr); \ 575 __p < (__addr) + (__objects) * (__s)->size; \ 576 __p += (__s)->size) 577 578 static inline unsigned int order_objects(unsigned int order, unsigned int size) 579 { 580 return ((unsigned int)PAGE_SIZE << order) / size; 581 } 582 583 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 584 unsigned int size) 585 { 586 struct kmem_cache_order_objects x = { 587 (order << OO_SHIFT) + order_objects(order, size) 588 }; 589 590 return x; 591 } 592 593 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 594 { 595 return x.x >> OO_SHIFT; 596 } 597 598 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 599 { 600 return x.x & OO_MASK; 601 } 602 603 #ifdef CONFIG_SLUB_CPU_PARTIAL 604 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 605 { 606 unsigned int nr_slabs; 607 608 s->cpu_partial = nr_objects; 609 610 /* 611 * We take the number of objects but actually limit the number of 612 * slabs on the per cpu partial list, in order to limit excessive 613 * growth of the list. For simplicity we assume that the slabs will 614 * be half-full. 615 */ 616 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 617 s->cpu_partial_slabs = nr_slabs; 618 } 619 620 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 621 { 622 return s->cpu_partial_slabs; 623 } 624 #else 625 static inline void 626 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 627 { 628 } 629 630 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 631 { 632 return 0; 633 } 634 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 635 636 /* 637 * Per slab locking using the pagelock 638 */ 639 static __always_inline void slab_lock(struct slab *slab) 640 { 641 bit_spin_lock(PG_locked, &slab->__page_flags); 642 } 643 644 static __always_inline void slab_unlock(struct slab *slab) 645 { 646 bit_spin_unlock(PG_locked, &slab->__page_flags); 647 } 648 649 static inline bool 650 __update_freelist_fast(struct slab *slab, 651 void *freelist_old, unsigned long counters_old, 652 void *freelist_new, unsigned long counters_new) 653 { 654 #ifdef system_has_freelist_aba 655 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; 656 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; 657 658 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); 659 #else 660 return false; 661 #endif 662 } 663 664 static inline bool 665 __update_freelist_slow(struct slab *slab, 666 void *freelist_old, unsigned long counters_old, 667 void *freelist_new, unsigned long counters_new) 668 { 669 bool ret = false; 670 671 slab_lock(slab); 672 if (slab->freelist == freelist_old && 673 slab->counters == counters_old) { 674 slab->freelist = freelist_new; 675 slab->counters = counters_new; 676 ret = true; 677 } 678 slab_unlock(slab); 679 680 return ret; 681 } 682 683 /* 684 * Interrupts must be disabled (for the fallback code to work right), typically 685 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 686 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 687 * allocation/ free operation in hardirq context. Therefore nothing can 688 * interrupt the operation. 689 */ 690 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 691 void *freelist_old, unsigned long counters_old, 692 void *freelist_new, unsigned long counters_new, 693 const char *n) 694 { 695 bool ret; 696 697 if (USE_LOCKLESS_FAST_PATH()) 698 lockdep_assert_irqs_disabled(); 699 700 if (s->flags & __CMPXCHG_DOUBLE) { 701 ret = __update_freelist_fast(slab, freelist_old, counters_old, 702 freelist_new, counters_new); 703 } else { 704 ret = __update_freelist_slow(slab, freelist_old, counters_old, 705 freelist_new, counters_new); 706 } 707 if (likely(ret)) 708 return true; 709 710 cpu_relax(); 711 stat(s, CMPXCHG_DOUBLE_FAIL); 712 713 #ifdef SLUB_DEBUG_CMPXCHG 714 pr_info("%s %s: cmpxchg double redo ", n, s->name); 715 #endif 716 717 return false; 718 } 719 720 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 721 void *freelist_old, unsigned long counters_old, 722 void *freelist_new, unsigned long counters_new, 723 const char *n) 724 { 725 bool ret; 726 727 if (s->flags & __CMPXCHG_DOUBLE) { 728 ret = __update_freelist_fast(slab, freelist_old, counters_old, 729 freelist_new, counters_new); 730 } else { 731 unsigned long flags; 732 733 local_irq_save(flags); 734 ret = __update_freelist_slow(slab, freelist_old, counters_old, 735 freelist_new, counters_new); 736 local_irq_restore(flags); 737 } 738 if (likely(ret)) 739 return true; 740 741 cpu_relax(); 742 stat(s, CMPXCHG_DOUBLE_FAIL); 743 744 #ifdef SLUB_DEBUG_CMPXCHG 745 pr_info("%s %s: cmpxchg double redo ", n, s->name); 746 #endif 747 748 return false; 749 } 750 751 /* 752 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 753 * family will round up the real request size to these fixed ones, so 754 * there could be an extra area than what is requested. Save the original 755 * request size in the meta data area, for better debug and sanity check. 756 */ 757 static inline void set_orig_size(struct kmem_cache *s, 758 void *object, unsigned int orig_size) 759 { 760 void *p = kasan_reset_tag(object); 761 762 if (!slub_debug_orig_size(s)) 763 return; 764 765 p += get_info_end(s); 766 p += sizeof(struct track) * 2; 767 768 *(unsigned int *)p = orig_size; 769 } 770 771 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 772 { 773 void *p = kasan_reset_tag(object); 774 775 if (is_kfence_address(object)) 776 return kfence_ksize(object); 777 778 if (!slub_debug_orig_size(s)) 779 return s->object_size; 780 781 p += get_info_end(s); 782 p += sizeof(struct track) * 2; 783 784 return *(unsigned int *)p; 785 } 786 787 #ifdef CONFIG_SLUB_DEBUG 788 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 789 static DEFINE_SPINLOCK(object_map_lock); 790 791 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 792 struct slab *slab) 793 { 794 void *addr = slab_address(slab); 795 void *p; 796 797 bitmap_zero(obj_map, slab->objects); 798 799 for (p = slab->freelist; p; p = get_freepointer(s, p)) 800 set_bit(__obj_to_index(s, addr, p), obj_map); 801 } 802 803 #if IS_ENABLED(CONFIG_KUNIT) 804 static bool slab_add_kunit_errors(void) 805 { 806 struct kunit_resource *resource; 807 808 if (!kunit_get_current_test()) 809 return false; 810 811 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 812 if (!resource) 813 return false; 814 815 (*(int *)resource->data)++; 816 kunit_put_resource(resource); 817 return true; 818 } 819 820 bool slab_in_kunit_test(void) 821 { 822 struct kunit_resource *resource; 823 824 if (!kunit_get_current_test()) 825 return false; 826 827 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 828 if (!resource) 829 return false; 830 831 kunit_put_resource(resource); 832 return true; 833 } 834 #else 835 static inline bool slab_add_kunit_errors(void) { return false; } 836 #endif 837 838 static inline unsigned int size_from_object(struct kmem_cache *s) 839 { 840 if (s->flags & SLAB_RED_ZONE) 841 return s->size - s->red_left_pad; 842 843 return s->size; 844 } 845 846 static inline void *restore_red_left(struct kmem_cache *s, void *p) 847 { 848 if (s->flags & SLAB_RED_ZONE) 849 p -= s->red_left_pad; 850 851 return p; 852 } 853 854 /* 855 * Debug settings: 856 */ 857 #if defined(CONFIG_SLUB_DEBUG_ON) 858 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 859 #else 860 static slab_flags_t slub_debug; 861 #endif 862 863 static char *slub_debug_string; 864 static int disable_higher_order_debug; 865 866 /* 867 * slub is about to manipulate internal object metadata. This memory lies 868 * outside the range of the allocated object, so accessing it would normally 869 * be reported by kasan as a bounds error. metadata_access_enable() is used 870 * to tell kasan that these accesses are OK. 871 */ 872 static inline void metadata_access_enable(void) 873 { 874 kasan_disable_current(); 875 kmsan_disable_current(); 876 } 877 878 static inline void metadata_access_disable(void) 879 { 880 kmsan_enable_current(); 881 kasan_enable_current(); 882 } 883 884 /* 885 * Object debugging 886 */ 887 888 /* Verify that a pointer has an address that is valid within a slab page */ 889 static inline int check_valid_pointer(struct kmem_cache *s, 890 struct slab *slab, void *object) 891 { 892 void *base; 893 894 if (!object) 895 return 1; 896 897 base = slab_address(slab); 898 object = kasan_reset_tag(object); 899 object = restore_red_left(s, object); 900 if (object < base || object >= base + slab->objects * s->size || 901 (object - base) % s->size) { 902 return 0; 903 } 904 905 return 1; 906 } 907 908 static void print_section(char *level, char *text, u8 *addr, 909 unsigned int length) 910 { 911 metadata_access_enable(); 912 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 913 16, 1, kasan_reset_tag((void *)addr), length, 1); 914 metadata_access_disable(); 915 } 916 917 static struct track *get_track(struct kmem_cache *s, void *object, 918 enum track_item alloc) 919 { 920 struct track *p; 921 922 p = object + get_info_end(s); 923 924 return kasan_reset_tag(p + alloc); 925 } 926 927 #ifdef CONFIG_STACKDEPOT 928 static noinline depot_stack_handle_t set_track_prepare(void) 929 { 930 depot_stack_handle_t handle; 931 unsigned long entries[TRACK_ADDRS_COUNT]; 932 unsigned int nr_entries; 933 934 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 935 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 936 937 return handle; 938 } 939 #else 940 static inline depot_stack_handle_t set_track_prepare(void) 941 { 942 return 0; 943 } 944 #endif 945 946 static void set_track_update(struct kmem_cache *s, void *object, 947 enum track_item alloc, unsigned long addr, 948 depot_stack_handle_t handle) 949 { 950 struct track *p = get_track(s, object, alloc); 951 952 #ifdef CONFIG_STACKDEPOT 953 p->handle = handle; 954 #endif 955 p->addr = addr; 956 p->cpu = smp_processor_id(); 957 p->pid = current->pid; 958 p->when = jiffies; 959 } 960 961 static __always_inline void set_track(struct kmem_cache *s, void *object, 962 enum track_item alloc, unsigned long addr) 963 { 964 depot_stack_handle_t handle = set_track_prepare(); 965 966 set_track_update(s, object, alloc, addr, handle); 967 } 968 969 static void init_tracking(struct kmem_cache *s, void *object) 970 { 971 struct track *p; 972 973 if (!(s->flags & SLAB_STORE_USER)) 974 return; 975 976 p = get_track(s, object, TRACK_ALLOC); 977 memset(p, 0, 2*sizeof(struct track)); 978 } 979 980 static void print_track(const char *s, struct track *t, unsigned long pr_time) 981 { 982 depot_stack_handle_t handle __maybe_unused; 983 984 if (!t->addr) 985 return; 986 987 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 988 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 989 #ifdef CONFIG_STACKDEPOT 990 handle = READ_ONCE(t->handle); 991 if (handle) 992 stack_depot_print(handle); 993 else 994 pr_err("object allocation/free stack trace missing\n"); 995 #endif 996 } 997 998 void print_tracking(struct kmem_cache *s, void *object) 999 { 1000 unsigned long pr_time = jiffies; 1001 if (!(s->flags & SLAB_STORE_USER)) 1002 return; 1003 1004 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 1005 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 1006 } 1007 1008 static void print_slab_info(const struct slab *slab) 1009 { 1010 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 1011 slab, slab->objects, slab->inuse, slab->freelist, 1012 &slab->__page_flags); 1013 } 1014 1015 void skip_orig_size_check(struct kmem_cache *s, const void *object) 1016 { 1017 set_orig_size(s, (void *)object, s->object_size); 1018 } 1019 1020 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 1021 { 1022 struct va_format vaf; 1023 va_list args; 1024 1025 va_start(args, fmt); 1026 vaf.fmt = fmt; 1027 vaf.va = &args; 1028 pr_err("=============================================================================\n"); 1029 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 1030 pr_err("-----------------------------------------------------------------------------\n\n"); 1031 va_end(args); 1032 } 1033 1034 __printf(2, 3) 1035 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 1036 { 1037 struct va_format vaf; 1038 va_list args; 1039 1040 if (slab_add_kunit_errors()) 1041 return; 1042 1043 va_start(args, fmt); 1044 vaf.fmt = fmt; 1045 vaf.va = &args; 1046 pr_err("FIX %s: %pV\n", s->name, &vaf); 1047 va_end(args); 1048 } 1049 1050 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 1051 { 1052 unsigned int off; /* Offset of last byte */ 1053 u8 *addr = slab_address(slab); 1054 1055 print_tracking(s, p); 1056 1057 print_slab_info(slab); 1058 1059 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 1060 p, p - addr, get_freepointer(s, p)); 1061 1062 if (s->flags & SLAB_RED_ZONE) 1063 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 1064 s->red_left_pad); 1065 else if (p > addr + 16) 1066 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 1067 1068 print_section(KERN_ERR, "Object ", p, 1069 min_t(unsigned int, s->object_size, PAGE_SIZE)); 1070 if (s->flags & SLAB_RED_ZONE) 1071 print_section(KERN_ERR, "Redzone ", p + s->object_size, 1072 s->inuse - s->object_size); 1073 1074 off = get_info_end(s); 1075 1076 if (s->flags & SLAB_STORE_USER) 1077 off += 2 * sizeof(struct track); 1078 1079 if (slub_debug_orig_size(s)) 1080 off += sizeof(unsigned int); 1081 1082 off += kasan_metadata_size(s, false); 1083 1084 if (off != size_from_object(s)) 1085 /* Beginning of the filler is the free pointer */ 1086 print_section(KERN_ERR, "Padding ", p + off, 1087 size_from_object(s) - off); 1088 1089 dump_stack(); 1090 } 1091 1092 static void object_err(struct kmem_cache *s, struct slab *slab, 1093 u8 *object, char *reason) 1094 { 1095 if (slab_add_kunit_errors()) 1096 return; 1097 1098 slab_bug(s, "%s", reason); 1099 print_trailer(s, slab, object); 1100 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1101 } 1102 1103 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1104 void **freelist, void *nextfree) 1105 { 1106 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 1107 !check_valid_pointer(s, slab, nextfree) && freelist) { 1108 object_err(s, slab, *freelist, "Freechain corrupt"); 1109 *freelist = NULL; 1110 slab_fix(s, "Isolate corrupted freechain"); 1111 return true; 1112 } 1113 1114 return false; 1115 } 1116 1117 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1118 const char *fmt, ...) 1119 { 1120 va_list args; 1121 char buf[100]; 1122 1123 if (slab_add_kunit_errors()) 1124 return; 1125 1126 va_start(args, fmt); 1127 vsnprintf(buf, sizeof(buf), fmt, args); 1128 va_end(args); 1129 slab_bug(s, "%s", buf); 1130 print_slab_info(slab); 1131 dump_stack(); 1132 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1133 } 1134 1135 static void init_object(struct kmem_cache *s, void *object, u8 val) 1136 { 1137 u8 *p = kasan_reset_tag(object); 1138 unsigned int poison_size = s->object_size; 1139 1140 if (s->flags & SLAB_RED_ZONE) { 1141 /* 1142 * Here and below, avoid overwriting the KMSAN shadow. Keeping 1143 * the shadow makes it possible to distinguish uninit-value 1144 * from use-after-free. 1145 */ 1146 memset_no_sanitize_memory(p - s->red_left_pad, val, 1147 s->red_left_pad); 1148 1149 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1150 /* 1151 * Redzone the extra allocated space by kmalloc than 1152 * requested, and the poison size will be limited to 1153 * the original request size accordingly. 1154 */ 1155 poison_size = get_orig_size(s, object); 1156 } 1157 } 1158 1159 if (s->flags & __OBJECT_POISON) { 1160 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1); 1161 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1); 1162 } 1163 1164 if (s->flags & SLAB_RED_ZONE) 1165 memset_no_sanitize_memory(p + poison_size, val, 1166 s->inuse - poison_size); 1167 } 1168 1169 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 1170 void *from, void *to) 1171 { 1172 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1173 memset(from, data, to - from); 1174 } 1175 1176 #ifdef CONFIG_KMSAN 1177 #define pad_check_attributes noinline __no_kmsan_checks 1178 #else 1179 #define pad_check_attributes 1180 #endif 1181 1182 static pad_check_attributes int 1183 check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1184 u8 *object, char *what, 1185 u8 *start, unsigned int value, unsigned int bytes) 1186 { 1187 u8 *fault; 1188 u8 *end; 1189 u8 *addr = slab_address(slab); 1190 1191 metadata_access_enable(); 1192 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1193 metadata_access_disable(); 1194 if (!fault) 1195 return 1; 1196 1197 end = start + bytes; 1198 while (end > fault && end[-1] == value) 1199 end--; 1200 1201 if (slab_add_kunit_errors()) 1202 goto skip_bug_print; 1203 1204 slab_bug(s, "%s overwritten", what); 1205 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1206 fault, end - 1, fault - addr, 1207 fault[0], value); 1208 1209 skip_bug_print: 1210 restore_bytes(s, what, value, fault, end); 1211 return 0; 1212 } 1213 1214 /* 1215 * Object layout: 1216 * 1217 * object address 1218 * Bytes of the object to be managed. 1219 * If the freepointer may overlay the object then the free 1220 * pointer is at the middle of the object. 1221 * 1222 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1223 * 0xa5 (POISON_END) 1224 * 1225 * object + s->object_size 1226 * Padding to reach word boundary. This is also used for Redzoning. 1227 * Padding is extended by another word if Redzoning is enabled and 1228 * object_size == inuse. 1229 * 1230 * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with 1231 * 0xcc (SLUB_RED_ACTIVE) for objects in use. 1232 * 1233 * object + s->inuse 1234 * Meta data starts here. 1235 * 1236 * A. Free pointer (if we cannot overwrite object on free) 1237 * B. Tracking data for SLAB_STORE_USER 1238 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1239 * D. Padding to reach required alignment boundary or at minimum 1240 * one word if debugging is on to be able to detect writes 1241 * before the word boundary. 1242 * 1243 * Padding is done using 0x5a (POISON_INUSE) 1244 * 1245 * object + s->size 1246 * Nothing is used beyond s->size. 1247 * 1248 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1249 * ignored. And therefore no slab options that rely on these boundaries 1250 * may be used with merged slabcaches. 1251 */ 1252 1253 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1254 { 1255 unsigned long off = get_info_end(s); /* The end of info */ 1256 1257 if (s->flags & SLAB_STORE_USER) { 1258 /* We also have user information there */ 1259 off += 2 * sizeof(struct track); 1260 1261 if (s->flags & SLAB_KMALLOC) 1262 off += sizeof(unsigned int); 1263 } 1264 1265 off += kasan_metadata_size(s, false); 1266 1267 if (size_from_object(s) == off) 1268 return 1; 1269 1270 return check_bytes_and_report(s, slab, p, "Object padding", 1271 p + off, POISON_INUSE, size_from_object(s) - off); 1272 } 1273 1274 /* Check the pad bytes at the end of a slab page */ 1275 static pad_check_attributes void 1276 slab_pad_check(struct kmem_cache *s, struct slab *slab) 1277 { 1278 u8 *start; 1279 u8 *fault; 1280 u8 *end; 1281 u8 *pad; 1282 int length; 1283 int remainder; 1284 1285 if (!(s->flags & SLAB_POISON)) 1286 return; 1287 1288 start = slab_address(slab); 1289 length = slab_size(slab); 1290 end = start + length; 1291 remainder = length % s->size; 1292 if (!remainder) 1293 return; 1294 1295 pad = end - remainder; 1296 metadata_access_enable(); 1297 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1298 metadata_access_disable(); 1299 if (!fault) 1300 return; 1301 while (end > fault && end[-1] == POISON_INUSE) 1302 end--; 1303 1304 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1305 fault, end - 1, fault - start); 1306 print_section(KERN_ERR, "Padding ", pad, remainder); 1307 1308 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1309 } 1310 1311 static int check_object(struct kmem_cache *s, struct slab *slab, 1312 void *object, u8 val) 1313 { 1314 u8 *p = object; 1315 u8 *endobject = object + s->object_size; 1316 unsigned int orig_size, kasan_meta_size; 1317 int ret = 1; 1318 1319 if (s->flags & SLAB_RED_ZONE) { 1320 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1321 object - s->red_left_pad, val, s->red_left_pad)) 1322 ret = 0; 1323 1324 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1325 endobject, val, s->inuse - s->object_size)) 1326 ret = 0; 1327 1328 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1329 orig_size = get_orig_size(s, object); 1330 1331 if (s->object_size > orig_size && 1332 !check_bytes_and_report(s, slab, object, 1333 "kmalloc Redzone", p + orig_size, 1334 val, s->object_size - orig_size)) { 1335 ret = 0; 1336 } 1337 } 1338 } else { 1339 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1340 if (!check_bytes_and_report(s, slab, p, "Alignment padding", 1341 endobject, POISON_INUSE, 1342 s->inuse - s->object_size)) 1343 ret = 0; 1344 } 1345 } 1346 1347 if (s->flags & SLAB_POISON) { 1348 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { 1349 /* 1350 * KASAN can save its free meta data inside of the 1351 * object at offset 0. Thus, skip checking the part of 1352 * the redzone that overlaps with the meta data. 1353 */ 1354 kasan_meta_size = kasan_metadata_size(s, true); 1355 if (kasan_meta_size < s->object_size - 1 && 1356 !check_bytes_and_report(s, slab, p, "Poison", 1357 p + kasan_meta_size, POISON_FREE, 1358 s->object_size - kasan_meta_size - 1)) 1359 ret = 0; 1360 if (kasan_meta_size < s->object_size && 1361 !check_bytes_and_report(s, slab, p, "End Poison", 1362 p + s->object_size - 1, POISON_END, 1)) 1363 ret = 0; 1364 } 1365 /* 1366 * check_pad_bytes cleans up on its own. 1367 */ 1368 if (!check_pad_bytes(s, slab, p)) 1369 ret = 0; 1370 } 1371 1372 /* 1373 * Cannot check freepointer while object is allocated if 1374 * object and freepointer overlap. 1375 */ 1376 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) && 1377 !check_valid_pointer(s, slab, get_freepointer(s, p))) { 1378 object_err(s, slab, p, "Freepointer corrupt"); 1379 /* 1380 * No choice but to zap it and thus lose the remainder 1381 * of the free objects in this slab. May cause 1382 * another error because the object count is now wrong. 1383 */ 1384 set_freepointer(s, p, NULL); 1385 ret = 0; 1386 } 1387 1388 if (!ret && !slab_in_kunit_test()) { 1389 print_trailer(s, slab, object); 1390 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1391 } 1392 1393 return ret; 1394 } 1395 1396 static int check_slab(struct kmem_cache *s, struct slab *slab) 1397 { 1398 int maxobj; 1399 1400 if (!folio_test_slab(slab_folio(slab))) { 1401 slab_err(s, slab, "Not a valid slab page"); 1402 return 0; 1403 } 1404 1405 maxobj = order_objects(slab_order(slab), s->size); 1406 if (slab->objects > maxobj) { 1407 slab_err(s, slab, "objects %u > max %u", 1408 slab->objects, maxobj); 1409 return 0; 1410 } 1411 if (slab->inuse > slab->objects) { 1412 slab_err(s, slab, "inuse %u > max %u", 1413 slab->inuse, slab->objects); 1414 return 0; 1415 } 1416 if (slab->frozen) { 1417 slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed"); 1418 return 0; 1419 } 1420 1421 /* Slab_pad_check fixes things up after itself */ 1422 slab_pad_check(s, slab); 1423 return 1; 1424 } 1425 1426 /* 1427 * Determine if a certain object in a slab is on the freelist. Must hold the 1428 * slab lock to guarantee that the chains are in a consistent state. 1429 */ 1430 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1431 { 1432 int nr = 0; 1433 void *fp; 1434 void *object = NULL; 1435 int max_objects; 1436 1437 fp = slab->freelist; 1438 while (fp && nr <= slab->objects) { 1439 if (fp == search) 1440 return 1; 1441 if (!check_valid_pointer(s, slab, fp)) { 1442 if (object) { 1443 object_err(s, slab, object, 1444 "Freechain corrupt"); 1445 set_freepointer(s, object, NULL); 1446 } else { 1447 slab_err(s, slab, "Freepointer corrupt"); 1448 slab->freelist = NULL; 1449 slab->inuse = slab->objects; 1450 slab_fix(s, "Freelist cleared"); 1451 return 0; 1452 } 1453 break; 1454 } 1455 object = fp; 1456 fp = get_freepointer(s, object); 1457 nr++; 1458 } 1459 1460 max_objects = order_objects(slab_order(slab), s->size); 1461 if (max_objects > MAX_OBJS_PER_PAGE) 1462 max_objects = MAX_OBJS_PER_PAGE; 1463 1464 if (slab->objects != max_objects) { 1465 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1466 slab->objects, max_objects); 1467 slab->objects = max_objects; 1468 slab_fix(s, "Number of objects adjusted"); 1469 } 1470 if (slab->inuse != slab->objects - nr) { 1471 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1472 slab->inuse, slab->objects - nr); 1473 slab->inuse = slab->objects - nr; 1474 slab_fix(s, "Object count adjusted"); 1475 } 1476 return search == NULL; 1477 } 1478 1479 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1480 int alloc) 1481 { 1482 if (s->flags & SLAB_TRACE) { 1483 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1484 s->name, 1485 alloc ? "alloc" : "free", 1486 object, slab->inuse, 1487 slab->freelist); 1488 1489 if (!alloc) 1490 print_section(KERN_INFO, "Object ", (void *)object, 1491 s->object_size); 1492 1493 dump_stack(); 1494 } 1495 } 1496 1497 /* 1498 * Tracking of fully allocated slabs for debugging purposes. 1499 */ 1500 static void add_full(struct kmem_cache *s, 1501 struct kmem_cache_node *n, struct slab *slab) 1502 { 1503 if (!(s->flags & SLAB_STORE_USER)) 1504 return; 1505 1506 lockdep_assert_held(&n->list_lock); 1507 list_add(&slab->slab_list, &n->full); 1508 } 1509 1510 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1511 { 1512 if (!(s->flags & SLAB_STORE_USER)) 1513 return; 1514 1515 lockdep_assert_held(&n->list_lock); 1516 list_del(&slab->slab_list); 1517 } 1518 1519 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1520 { 1521 return atomic_long_read(&n->nr_slabs); 1522 } 1523 1524 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1525 { 1526 struct kmem_cache_node *n = get_node(s, node); 1527 1528 atomic_long_inc(&n->nr_slabs); 1529 atomic_long_add(objects, &n->total_objects); 1530 } 1531 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1532 { 1533 struct kmem_cache_node *n = get_node(s, node); 1534 1535 atomic_long_dec(&n->nr_slabs); 1536 atomic_long_sub(objects, &n->total_objects); 1537 } 1538 1539 /* Object debug checks for alloc/free paths */ 1540 static void setup_object_debug(struct kmem_cache *s, void *object) 1541 { 1542 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1543 return; 1544 1545 init_object(s, object, SLUB_RED_INACTIVE); 1546 init_tracking(s, object); 1547 } 1548 1549 static 1550 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1551 { 1552 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1553 return; 1554 1555 metadata_access_enable(); 1556 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1557 metadata_access_disable(); 1558 } 1559 1560 static inline int alloc_consistency_checks(struct kmem_cache *s, 1561 struct slab *slab, void *object) 1562 { 1563 if (!check_slab(s, slab)) 1564 return 0; 1565 1566 if (!check_valid_pointer(s, slab, object)) { 1567 object_err(s, slab, object, "Freelist Pointer check fails"); 1568 return 0; 1569 } 1570 1571 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1572 return 0; 1573 1574 return 1; 1575 } 1576 1577 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1578 struct slab *slab, void *object, int orig_size) 1579 { 1580 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1581 if (!alloc_consistency_checks(s, slab, object)) 1582 goto bad; 1583 } 1584 1585 /* Success. Perform special debug activities for allocs */ 1586 trace(s, slab, object, 1); 1587 set_orig_size(s, object, orig_size); 1588 init_object(s, object, SLUB_RED_ACTIVE); 1589 return true; 1590 1591 bad: 1592 if (folio_test_slab(slab_folio(slab))) { 1593 /* 1594 * If this is a slab page then lets do the best we can 1595 * to avoid issues in the future. Marking all objects 1596 * as used avoids touching the remaining objects. 1597 */ 1598 slab_fix(s, "Marking all objects used"); 1599 slab->inuse = slab->objects; 1600 slab->freelist = NULL; 1601 slab->frozen = 1; /* mark consistency-failed slab as frozen */ 1602 } 1603 return false; 1604 } 1605 1606 static inline int free_consistency_checks(struct kmem_cache *s, 1607 struct slab *slab, void *object, unsigned long addr) 1608 { 1609 if (!check_valid_pointer(s, slab, object)) { 1610 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1611 return 0; 1612 } 1613 1614 if (on_freelist(s, slab, object)) { 1615 object_err(s, slab, object, "Object already free"); 1616 return 0; 1617 } 1618 1619 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1620 return 0; 1621 1622 if (unlikely(s != slab->slab_cache)) { 1623 if (!folio_test_slab(slab_folio(slab))) { 1624 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", 1625 object); 1626 } else if (!slab->slab_cache) { 1627 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1628 object); 1629 dump_stack(); 1630 } else 1631 object_err(s, slab, object, 1632 "page slab pointer corrupt."); 1633 return 0; 1634 } 1635 return 1; 1636 } 1637 1638 /* 1639 * Parse a block of slab_debug options. Blocks are delimited by ';' 1640 * 1641 * @str: start of block 1642 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1643 * @slabs: return start of list of slabs, or NULL when there's no list 1644 * @init: assume this is initial parsing and not per-kmem-create parsing 1645 * 1646 * returns the start of next block if there's any, or NULL 1647 */ 1648 static char * 1649 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1650 { 1651 bool higher_order_disable = false; 1652 1653 /* Skip any completely empty blocks */ 1654 while (*str && *str == ';') 1655 str++; 1656 1657 if (*str == ',') { 1658 /* 1659 * No options but restriction on slabs. This means full 1660 * debugging for slabs matching a pattern. 1661 */ 1662 *flags = DEBUG_DEFAULT_FLAGS; 1663 goto check_slabs; 1664 } 1665 *flags = 0; 1666 1667 /* Determine which debug features should be switched on */ 1668 for (; *str && *str != ',' && *str != ';'; str++) { 1669 switch (tolower(*str)) { 1670 case '-': 1671 *flags = 0; 1672 break; 1673 case 'f': 1674 *flags |= SLAB_CONSISTENCY_CHECKS; 1675 break; 1676 case 'z': 1677 *flags |= SLAB_RED_ZONE; 1678 break; 1679 case 'p': 1680 *flags |= SLAB_POISON; 1681 break; 1682 case 'u': 1683 *flags |= SLAB_STORE_USER; 1684 break; 1685 case 't': 1686 *flags |= SLAB_TRACE; 1687 break; 1688 case 'a': 1689 *flags |= SLAB_FAILSLAB; 1690 break; 1691 case 'o': 1692 /* 1693 * Avoid enabling debugging on caches if its minimum 1694 * order would increase as a result. 1695 */ 1696 higher_order_disable = true; 1697 break; 1698 default: 1699 if (init) 1700 pr_err("slab_debug option '%c' unknown. skipped\n", *str); 1701 } 1702 } 1703 check_slabs: 1704 if (*str == ',') 1705 *slabs = ++str; 1706 else 1707 *slabs = NULL; 1708 1709 /* Skip over the slab list */ 1710 while (*str && *str != ';') 1711 str++; 1712 1713 /* Skip any completely empty blocks */ 1714 while (*str && *str == ';') 1715 str++; 1716 1717 if (init && higher_order_disable) 1718 disable_higher_order_debug = 1; 1719 1720 if (*str) 1721 return str; 1722 else 1723 return NULL; 1724 } 1725 1726 static int __init setup_slub_debug(char *str) 1727 { 1728 slab_flags_t flags; 1729 slab_flags_t global_flags; 1730 char *saved_str; 1731 char *slab_list; 1732 bool global_slub_debug_changed = false; 1733 bool slab_list_specified = false; 1734 1735 global_flags = DEBUG_DEFAULT_FLAGS; 1736 if (*str++ != '=' || !*str) 1737 /* 1738 * No options specified. Switch on full debugging. 1739 */ 1740 goto out; 1741 1742 saved_str = str; 1743 while (str) { 1744 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1745 1746 if (!slab_list) { 1747 global_flags = flags; 1748 global_slub_debug_changed = true; 1749 } else { 1750 slab_list_specified = true; 1751 if (flags & SLAB_STORE_USER) 1752 stack_depot_request_early_init(); 1753 } 1754 } 1755 1756 /* 1757 * For backwards compatibility, a single list of flags with list of 1758 * slabs means debugging is only changed for those slabs, so the global 1759 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1760 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1761 * long as there is no option specifying flags without a slab list. 1762 */ 1763 if (slab_list_specified) { 1764 if (!global_slub_debug_changed) 1765 global_flags = slub_debug; 1766 slub_debug_string = saved_str; 1767 } 1768 out: 1769 slub_debug = global_flags; 1770 if (slub_debug & SLAB_STORE_USER) 1771 stack_depot_request_early_init(); 1772 if (slub_debug != 0 || slub_debug_string) 1773 static_branch_enable(&slub_debug_enabled); 1774 else 1775 static_branch_disable(&slub_debug_enabled); 1776 if ((static_branch_unlikely(&init_on_alloc) || 1777 static_branch_unlikely(&init_on_free)) && 1778 (slub_debug & SLAB_POISON)) 1779 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1780 return 1; 1781 } 1782 1783 __setup("slab_debug", setup_slub_debug); 1784 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0); 1785 1786 /* 1787 * kmem_cache_flags - apply debugging options to the cache 1788 * @flags: flags to set 1789 * @name: name of the cache 1790 * 1791 * Debug option(s) are applied to @flags. In addition to the debug 1792 * option(s), if a slab name (or multiple) is specified i.e. 1793 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1794 * then only the select slabs will receive the debug option(s). 1795 */ 1796 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1797 { 1798 char *iter; 1799 size_t len; 1800 char *next_block; 1801 slab_flags_t block_flags; 1802 slab_flags_t slub_debug_local = slub_debug; 1803 1804 if (flags & SLAB_NO_USER_FLAGS) 1805 return flags; 1806 1807 /* 1808 * If the slab cache is for debugging (e.g. kmemleak) then 1809 * don't store user (stack trace) information by default, 1810 * but let the user enable it via the command line below. 1811 */ 1812 if (flags & SLAB_NOLEAKTRACE) 1813 slub_debug_local &= ~SLAB_STORE_USER; 1814 1815 len = strlen(name); 1816 next_block = slub_debug_string; 1817 /* Go through all blocks of debug options, see if any matches our slab's name */ 1818 while (next_block) { 1819 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1820 if (!iter) 1821 continue; 1822 /* Found a block that has a slab list, search it */ 1823 while (*iter) { 1824 char *end, *glob; 1825 size_t cmplen; 1826 1827 end = strchrnul(iter, ','); 1828 if (next_block && next_block < end) 1829 end = next_block - 1; 1830 1831 glob = strnchr(iter, end - iter, '*'); 1832 if (glob) 1833 cmplen = glob - iter; 1834 else 1835 cmplen = max_t(size_t, len, (end - iter)); 1836 1837 if (!strncmp(name, iter, cmplen)) { 1838 flags |= block_flags; 1839 return flags; 1840 } 1841 1842 if (!*end || *end == ';') 1843 break; 1844 iter = end + 1; 1845 } 1846 } 1847 1848 return flags | slub_debug_local; 1849 } 1850 #else /* !CONFIG_SLUB_DEBUG */ 1851 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1852 static inline 1853 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1854 1855 static inline bool alloc_debug_processing(struct kmem_cache *s, 1856 struct slab *slab, void *object, int orig_size) { return true; } 1857 1858 static inline bool free_debug_processing(struct kmem_cache *s, 1859 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1860 unsigned long addr, depot_stack_handle_t handle) { return true; } 1861 1862 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1863 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1864 void *object, u8 val) { return 1; } 1865 static inline depot_stack_handle_t set_track_prepare(void) { return 0; } 1866 static inline void set_track(struct kmem_cache *s, void *object, 1867 enum track_item alloc, unsigned long addr) {} 1868 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1869 struct slab *slab) {} 1870 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1871 struct slab *slab) {} 1872 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1873 { 1874 return flags; 1875 } 1876 #define slub_debug 0 1877 1878 #define disable_higher_order_debug 0 1879 1880 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1881 { return 0; } 1882 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1883 int objects) {} 1884 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1885 int objects) {} 1886 #ifndef CONFIG_SLUB_TINY 1887 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1888 void **freelist, void *nextfree) 1889 { 1890 return false; 1891 } 1892 #endif 1893 #endif /* CONFIG_SLUB_DEBUG */ 1894 1895 #ifdef CONFIG_SLAB_OBJ_EXT 1896 1897 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 1898 1899 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 1900 { 1901 struct slabobj_ext *slab_exts; 1902 struct slab *obj_exts_slab; 1903 1904 obj_exts_slab = virt_to_slab(obj_exts); 1905 slab_exts = slab_obj_exts(obj_exts_slab); 1906 if (slab_exts) { 1907 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 1908 obj_exts_slab, obj_exts); 1909 /* codetag should be NULL */ 1910 WARN_ON(slab_exts[offs].ref.ct); 1911 set_codetag_empty(&slab_exts[offs].ref); 1912 } 1913 } 1914 1915 static inline void mark_failed_objexts_alloc(struct slab *slab) 1916 { 1917 slab->obj_exts = OBJEXTS_ALLOC_FAIL; 1918 } 1919 1920 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 1921 struct slabobj_ext *vec, unsigned int objects) 1922 { 1923 /* 1924 * If vector previously failed to allocate then we have live 1925 * objects with no tag reference. Mark all references in this 1926 * vector as empty to avoid warnings later on. 1927 */ 1928 if (obj_exts & OBJEXTS_ALLOC_FAIL) { 1929 unsigned int i; 1930 1931 for (i = 0; i < objects; i++) 1932 set_codetag_empty(&vec[i].ref); 1933 } 1934 } 1935 1936 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 1937 1938 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} 1939 static inline void mark_failed_objexts_alloc(struct slab *slab) {} 1940 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 1941 struct slabobj_ext *vec, unsigned int objects) {} 1942 1943 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 1944 1945 /* 1946 * The allocated objcg pointers array is not accounted directly. 1947 * Moreover, it should not come from DMA buffer and is not readily 1948 * reclaimable. So those GFP bits should be masked off. 1949 */ 1950 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ 1951 __GFP_ACCOUNT | __GFP_NOFAIL) 1952 1953 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 1954 gfp_t gfp, bool new_slab) 1955 { 1956 unsigned int objects = objs_per_slab(s, slab); 1957 unsigned long new_exts; 1958 unsigned long old_exts; 1959 struct slabobj_ext *vec; 1960 1961 gfp &= ~OBJCGS_CLEAR_MASK; 1962 /* Prevent recursive extension vector allocation */ 1963 gfp |= __GFP_NO_OBJ_EXT; 1964 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, 1965 slab_nid(slab)); 1966 if (!vec) { 1967 /* Mark vectors which failed to allocate */ 1968 if (new_slab) 1969 mark_failed_objexts_alloc(slab); 1970 1971 return -ENOMEM; 1972 } 1973 1974 new_exts = (unsigned long)vec; 1975 #ifdef CONFIG_MEMCG 1976 new_exts |= MEMCG_DATA_OBJEXTS; 1977 #endif 1978 old_exts = READ_ONCE(slab->obj_exts); 1979 handle_failed_objexts_alloc(old_exts, vec, objects); 1980 if (new_slab) { 1981 /* 1982 * If the slab is brand new and nobody can yet access its 1983 * obj_exts, no synchronization is required and obj_exts can 1984 * be simply assigned. 1985 */ 1986 slab->obj_exts = new_exts; 1987 } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || 1988 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { 1989 /* 1990 * If the slab is already in use, somebody can allocate and 1991 * assign slabobj_exts in parallel. In this case the existing 1992 * objcg vector should be reused. 1993 */ 1994 mark_objexts_empty(vec); 1995 kfree(vec); 1996 return 0; 1997 } 1998 1999 kmemleak_not_leak(vec); 2000 return 0; 2001 } 2002 2003 static inline void free_slab_obj_exts(struct slab *slab) 2004 { 2005 struct slabobj_ext *obj_exts; 2006 2007 obj_exts = slab_obj_exts(slab); 2008 if (!obj_exts) 2009 return; 2010 2011 /* 2012 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its 2013 * corresponding extension will be NULL. alloc_tag_sub() will throw a 2014 * warning if slab has extensions but the extension of an object is 2015 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that 2016 * the extension for obj_exts is expected to be NULL. 2017 */ 2018 mark_objexts_empty(obj_exts); 2019 kfree(obj_exts); 2020 slab->obj_exts = 0; 2021 } 2022 2023 static inline bool need_slab_obj_ext(void) 2024 { 2025 if (mem_alloc_profiling_enabled()) 2026 return true; 2027 2028 /* 2029 * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally 2030 * inside memcg_slab_post_alloc_hook. No other users for now. 2031 */ 2032 return false; 2033 } 2034 2035 #else /* CONFIG_SLAB_OBJ_EXT */ 2036 2037 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 2038 gfp_t gfp, bool new_slab) 2039 { 2040 return 0; 2041 } 2042 2043 static inline void free_slab_obj_exts(struct slab *slab) 2044 { 2045 } 2046 2047 static inline bool need_slab_obj_ext(void) 2048 { 2049 return false; 2050 } 2051 2052 #endif /* CONFIG_SLAB_OBJ_EXT */ 2053 2054 #ifdef CONFIG_MEM_ALLOC_PROFILING 2055 2056 static inline struct slabobj_ext * 2057 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2058 { 2059 struct slab *slab; 2060 2061 if (!p) 2062 return NULL; 2063 2064 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2065 return NULL; 2066 2067 if (flags & __GFP_NO_OBJ_EXT) 2068 return NULL; 2069 2070 slab = virt_to_slab(p); 2071 if (!slab_obj_exts(slab) && 2072 WARN(alloc_slab_obj_exts(slab, s, flags, false), 2073 "%s, %s: Failed to create slab extension vector!\n", 2074 __func__, s->name)) 2075 return NULL; 2076 2077 return slab_obj_exts(slab) + obj_to_index(s, slab, p); 2078 } 2079 2080 static inline void 2081 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2082 { 2083 if (need_slab_obj_ext()) { 2084 struct slabobj_ext *obj_exts; 2085 2086 obj_exts = prepare_slab_obj_exts_hook(s, flags, object); 2087 /* 2088 * Currently obj_exts is used only for allocation profiling. 2089 * If other users appear then mem_alloc_profiling_enabled() 2090 * check should be added before alloc_tag_add(). 2091 */ 2092 if (likely(obj_exts)) 2093 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); 2094 } 2095 } 2096 2097 static inline void 2098 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2099 int objects) 2100 { 2101 struct slabobj_ext *obj_exts; 2102 int i; 2103 2104 if (!mem_alloc_profiling_enabled()) 2105 return; 2106 2107 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ 2108 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2109 return; 2110 2111 obj_exts = slab_obj_exts(slab); 2112 if (!obj_exts) 2113 return; 2114 2115 for (i = 0; i < objects; i++) { 2116 unsigned int off = obj_to_index(s, slab, p[i]); 2117 2118 alloc_tag_sub(&obj_exts[off].ref, s->size); 2119 } 2120 } 2121 2122 #else /* CONFIG_MEM_ALLOC_PROFILING */ 2123 2124 static inline void 2125 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2126 { 2127 } 2128 2129 static inline void 2130 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2131 int objects) 2132 { 2133 } 2134 2135 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 2136 2137 2138 #ifdef CONFIG_MEMCG 2139 2140 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); 2141 2142 static __fastpath_inline 2143 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2144 gfp_t flags, size_t size, void **p) 2145 { 2146 if (likely(!memcg_kmem_online())) 2147 return true; 2148 2149 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) 2150 return true; 2151 2152 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p))) 2153 return true; 2154 2155 if (likely(size == 1)) { 2156 memcg_alloc_abort_single(s, *p); 2157 *p = NULL; 2158 } else { 2159 kmem_cache_free_bulk(s, size, p); 2160 } 2161 2162 return false; 2163 } 2164 2165 static __fastpath_inline 2166 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2167 int objects) 2168 { 2169 struct slabobj_ext *obj_exts; 2170 2171 if (!memcg_kmem_online()) 2172 return; 2173 2174 obj_exts = slab_obj_exts(slab); 2175 if (likely(!obj_exts)) 2176 return; 2177 2178 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); 2179 } 2180 2181 static __fastpath_inline 2182 bool memcg_slab_post_charge(void *p, gfp_t flags) 2183 { 2184 struct slabobj_ext *slab_exts; 2185 struct kmem_cache *s; 2186 struct folio *folio; 2187 struct slab *slab; 2188 unsigned long off; 2189 2190 folio = virt_to_folio(p); 2191 if (!folio_test_slab(folio)) { 2192 int size; 2193 2194 if (folio_memcg_kmem(folio)) 2195 return true; 2196 2197 if (__memcg_kmem_charge_page(folio_page(folio, 0), flags, 2198 folio_order(folio))) 2199 return false; 2200 2201 /* 2202 * This folio has already been accounted in the global stats but 2203 * not in the memcg stats. So, subtract from the global and use 2204 * the interface which adds to both global and memcg stats. 2205 */ 2206 size = folio_size(folio); 2207 node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size); 2208 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size); 2209 return true; 2210 } 2211 2212 slab = folio_slab(folio); 2213 s = slab->slab_cache; 2214 2215 /* 2216 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency 2217 * of slab_obj_exts being allocated from the same slab and thus the slab 2218 * becoming effectively unfreeable. 2219 */ 2220 if (is_kmalloc_normal(s)) 2221 return true; 2222 2223 /* Ignore already charged objects. */ 2224 slab_exts = slab_obj_exts(slab); 2225 if (slab_exts) { 2226 off = obj_to_index(s, slab, p); 2227 if (unlikely(slab_exts[off].objcg)) 2228 return true; 2229 } 2230 2231 return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p); 2232 } 2233 2234 #else /* CONFIG_MEMCG */ 2235 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, 2236 struct list_lru *lru, 2237 gfp_t flags, size_t size, 2238 void **p) 2239 { 2240 return true; 2241 } 2242 2243 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2244 void **p, int objects) 2245 { 2246 } 2247 2248 static inline bool memcg_slab_post_charge(void *p, gfp_t flags) 2249 { 2250 return true; 2251 } 2252 #endif /* CONFIG_MEMCG */ 2253 2254 #ifdef CONFIG_SLUB_RCU_DEBUG 2255 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head); 2256 2257 struct rcu_delayed_free { 2258 struct rcu_head head; 2259 void *object; 2260 }; 2261 #endif 2262 2263 /* 2264 * Hooks for other subsystems that check memory allocations. In a typical 2265 * production configuration these hooks all should produce no code at all. 2266 * 2267 * Returns true if freeing of the object can proceed, false if its reuse 2268 * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned 2269 * to KFENCE. 2270 */ 2271 static __always_inline 2272 bool slab_free_hook(struct kmem_cache *s, void *x, bool init, 2273 bool after_rcu_delay) 2274 { 2275 /* Are the object contents still accessible? */ 2276 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay; 2277 2278 kmemleak_free_recursive(x, s->flags); 2279 kmsan_slab_free(s, x); 2280 2281 debug_check_no_locks_freed(x, s->object_size); 2282 2283 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 2284 debug_check_no_obj_freed(x, s->object_size); 2285 2286 /* Use KCSAN to help debug racy use-after-free. */ 2287 if (!still_accessible) 2288 __kcsan_check_access(x, s->object_size, 2289 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 2290 2291 if (kfence_free(x)) 2292 return false; 2293 2294 /* 2295 * Give KASAN a chance to notice an invalid free operation before we 2296 * modify the object. 2297 */ 2298 if (kasan_slab_pre_free(s, x)) 2299 return false; 2300 2301 #ifdef CONFIG_SLUB_RCU_DEBUG 2302 if (still_accessible) { 2303 struct rcu_delayed_free *delayed_free; 2304 2305 delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT); 2306 if (delayed_free) { 2307 /* 2308 * Let KASAN track our call stack as a "related work 2309 * creation", just like if the object had been freed 2310 * normally via kfree_rcu(). 2311 * We have to do this manually because the rcu_head is 2312 * not located inside the object. 2313 */ 2314 kasan_record_aux_stack_noalloc(x); 2315 2316 delayed_free->object = x; 2317 call_rcu(&delayed_free->head, slab_free_after_rcu_debug); 2318 return false; 2319 } 2320 } 2321 #endif /* CONFIG_SLUB_RCU_DEBUG */ 2322 2323 /* 2324 * As memory initialization might be integrated into KASAN, 2325 * kasan_slab_free and initialization memset's must be 2326 * kept together to avoid discrepancies in behavior. 2327 * 2328 * The initialization memset's clear the object and the metadata, 2329 * but don't touch the SLAB redzone. 2330 * 2331 * The object's freepointer is also avoided if stored outside the 2332 * object. 2333 */ 2334 if (unlikely(init)) { 2335 int rsize; 2336 unsigned int inuse, orig_size; 2337 2338 inuse = get_info_end(s); 2339 orig_size = get_orig_size(s, x); 2340 if (!kasan_has_integrated_init()) 2341 memset(kasan_reset_tag(x), 0, orig_size); 2342 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 2343 memset((char *)kasan_reset_tag(x) + inuse, 0, 2344 s->size - inuse - rsize); 2345 /* 2346 * Restore orig_size, otherwize kmalloc redzone overwritten 2347 * would be reported 2348 */ 2349 set_orig_size(s, x, orig_size); 2350 2351 } 2352 /* KASAN might put x into memory quarantine, delaying its reuse. */ 2353 return !kasan_slab_free(s, x, init, still_accessible); 2354 } 2355 2356 static __fastpath_inline 2357 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail, 2358 int *cnt) 2359 { 2360 2361 void *object; 2362 void *next = *head; 2363 void *old_tail = *tail; 2364 bool init; 2365 2366 if (is_kfence_address(next)) { 2367 slab_free_hook(s, next, false, false); 2368 return false; 2369 } 2370 2371 /* Head and tail of the reconstructed freelist */ 2372 *head = NULL; 2373 *tail = NULL; 2374 2375 init = slab_want_init_on_free(s); 2376 2377 do { 2378 object = next; 2379 next = get_freepointer(s, object); 2380 2381 /* If object's reuse doesn't have to be delayed */ 2382 if (likely(slab_free_hook(s, object, init, false))) { 2383 /* Move object to the new freelist */ 2384 set_freepointer(s, object, *head); 2385 *head = object; 2386 if (!*tail) 2387 *tail = object; 2388 } else { 2389 /* 2390 * Adjust the reconstructed freelist depth 2391 * accordingly if object's reuse is delayed. 2392 */ 2393 --(*cnt); 2394 } 2395 } while (object != old_tail); 2396 2397 return *head != NULL; 2398 } 2399 2400 static void *setup_object(struct kmem_cache *s, void *object) 2401 { 2402 setup_object_debug(s, object); 2403 object = kasan_init_slab_obj(s, object); 2404 if (unlikely(s->ctor)) { 2405 kasan_unpoison_new_object(s, object); 2406 s->ctor(object); 2407 kasan_poison_new_object(s, object); 2408 } 2409 return object; 2410 } 2411 2412 /* 2413 * Slab allocation and freeing 2414 */ 2415 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 2416 struct kmem_cache_order_objects oo) 2417 { 2418 struct folio *folio; 2419 struct slab *slab; 2420 unsigned int order = oo_order(oo); 2421 2422 if (node == NUMA_NO_NODE) 2423 folio = (struct folio *)alloc_pages(flags, order); 2424 else 2425 folio = (struct folio *)__alloc_pages_node(node, flags, order); 2426 2427 if (!folio) 2428 return NULL; 2429 2430 slab = folio_slab(folio); 2431 __folio_set_slab(folio); 2432 /* Make the flag visible before any changes to folio->mapping */ 2433 smp_wmb(); 2434 if (folio_is_pfmemalloc(folio)) 2435 slab_set_pfmemalloc(slab); 2436 2437 return slab; 2438 } 2439 2440 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2441 /* Pre-initialize the random sequence cache */ 2442 static int init_cache_random_seq(struct kmem_cache *s) 2443 { 2444 unsigned int count = oo_objects(s->oo); 2445 int err; 2446 2447 /* Bailout if already initialised */ 2448 if (s->random_seq) 2449 return 0; 2450 2451 err = cache_random_seq_create(s, count, GFP_KERNEL); 2452 if (err) { 2453 pr_err("SLUB: Unable to initialize free list for %s\n", 2454 s->name); 2455 return err; 2456 } 2457 2458 /* Transform to an offset on the set of pages */ 2459 if (s->random_seq) { 2460 unsigned int i; 2461 2462 for (i = 0; i < count; i++) 2463 s->random_seq[i] *= s->size; 2464 } 2465 return 0; 2466 } 2467 2468 /* Initialize each random sequence freelist per cache */ 2469 static void __init init_freelist_randomization(void) 2470 { 2471 struct kmem_cache *s; 2472 2473 mutex_lock(&slab_mutex); 2474 2475 list_for_each_entry(s, &slab_caches, list) 2476 init_cache_random_seq(s); 2477 2478 mutex_unlock(&slab_mutex); 2479 } 2480 2481 /* Get the next entry on the pre-computed freelist randomized */ 2482 static void *next_freelist_entry(struct kmem_cache *s, 2483 unsigned long *pos, void *start, 2484 unsigned long page_limit, 2485 unsigned long freelist_count) 2486 { 2487 unsigned int idx; 2488 2489 /* 2490 * If the target page allocation failed, the number of objects on the 2491 * page might be smaller than the usual size defined by the cache. 2492 */ 2493 do { 2494 idx = s->random_seq[*pos]; 2495 *pos += 1; 2496 if (*pos >= freelist_count) 2497 *pos = 0; 2498 } while (unlikely(idx >= page_limit)); 2499 2500 return (char *)start + idx; 2501 } 2502 2503 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 2504 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2505 { 2506 void *start; 2507 void *cur; 2508 void *next; 2509 unsigned long idx, pos, page_limit, freelist_count; 2510 2511 if (slab->objects < 2 || !s->random_seq) 2512 return false; 2513 2514 freelist_count = oo_objects(s->oo); 2515 pos = get_random_u32_below(freelist_count); 2516 2517 page_limit = slab->objects * s->size; 2518 start = fixup_red_left(s, slab_address(slab)); 2519 2520 /* First entry is used as the base of the freelist */ 2521 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); 2522 cur = setup_object(s, cur); 2523 slab->freelist = cur; 2524 2525 for (idx = 1; idx < slab->objects; idx++) { 2526 next = next_freelist_entry(s, &pos, start, page_limit, 2527 freelist_count); 2528 next = setup_object(s, next); 2529 set_freepointer(s, cur, next); 2530 cur = next; 2531 } 2532 set_freepointer(s, cur, NULL); 2533 2534 return true; 2535 } 2536 #else 2537 static inline int init_cache_random_seq(struct kmem_cache *s) 2538 { 2539 return 0; 2540 } 2541 static inline void init_freelist_randomization(void) { } 2542 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2543 { 2544 return false; 2545 } 2546 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2547 2548 static __always_inline void account_slab(struct slab *slab, int order, 2549 struct kmem_cache *s, gfp_t gfp) 2550 { 2551 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 2552 alloc_slab_obj_exts(slab, s, gfp, true); 2553 2554 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2555 PAGE_SIZE << order); 2556 } 2557 2558 static __always_inline void unaccount_slab(struct slab *slab, int order, 2559 struct kmem_cache *s) 2560 { 2561 if (memcg_kmem_online() || need_slab_obj_ext()) 2562 free_slab_obj_exts(slab); 2563 2564 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2565 -(PAGE_SIZE << order)); 2566 } 2567 2568 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 2569 { 2570 struct slab *slab; 2571 struct kmem_cache_order_objects oo = s->oo; 2572 gfp_t alloc_gfp; 2573 void *start, *p, *next; 2574 int idx; 2575 bool shuffle; 2576 2577 flags &= gfp_allowed_mask; 2578 2579 flags |= s->allocflags; 2580 2581 /* 2582 * Let the initial higher-order allocation fail under memory pressure 2583 * so we fall-back to the minimum order allocation. 2584 */ 2585 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 2586 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 2587 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 2588 2589 slab = alloc_slab_page(alloc_gfp, node, oo); 2590 if (unlikely(!slab)) { 2591 oo = s->min; 2592 alloc_gfp = flags; 2593 /* 2594 * Allocation may have failed due to fragmentation. 2595 * Try a lower order alloc if possible 2596 */ 2597 slab = alloc_slab_page(alloc_gfp, node, oo); 2598 if (unlikely(!slab)) 2599 return NULL; 2600 stat(s, ORDER_FALLBACK); 2601 } 2602 2603 slab->objects = oo_objects(oo); 2604 slab->inuse = 0; 2605 slab->frozen = 0; 2606 2607 account_slab(slab, oo_order(oo), s, flags); 2608 2609 slab->slab_cache = s; 2610 2611 kasan_poison_slab(slab); 2612 2613 start = slab_address(slab); 2614 2615 setup_slab_debug(s, slab, start); 2616 2617 shuffle = shuffle_freelist(s, slab); 2618 2619 if (!shuffle) { 2620 start = fixup_red_left(s, start); 2621 start = setup_object(s, start); 2622 slab->freelist = start; 2623 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 2624 next = p + s->size; 2625 next = setup_object(s, next); 2626 set_freepointer(s, p, next); 2627 p = next; 2628 } 2629 set_freepointer(s, p, NULL); 2630 } 2631 2632 return slab; 2633 } 2634 2635 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 2636 { 2637 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2638 flags = kmalloc_fix_flags(flags); 2639 2640 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2641 2642 return allocate_slab(s, 2643 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 2644 } 2645 2646 static void __free_slab(struct kmem_cache *s, struct slab *slab) 2647 { 2648 struct folio *folio = slab_folio(slab); 2649 int order = folio_order(folio); 2650 int pages = 1 << order; 2651 2652 __slab_clear_pfmemalloc(slab); 2653 folio->mapping = NULL; 2654 /* Make the mapping reset visible before clearing the flag */ 2655 smp_wmb(); 2656 __folio_clear_slab(folio); 2657 mm_account_reclaimed_pages(pages); 2658 unaccount_slab(slab, order, s); 2659 __free_pages(&folio->page, order); 2660 } 2661 2662 static void rcu_free_slab(struct rcu_head *h) 2663 { 2664 struct slab *slab = container_of(h, struct slab, rcu_head); 2665 2666 __free_slab(slab->slab_cache, slab); 2667 } 2668 2669 static void free_slab(struct kmem_cache *s, struct slab *slab) 2670 { 2671 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 2672 void *p; 2673 2674 slab_pad_check(s, slab); 2675 for_each_object(p, s, slab_address(slab), slab->objects) 2676 check_object(s, slab, p, SLUB_RED_INACTIVE); 2677 } 2678 2679 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 2680 call_rcu(&slab->rcu_head, rcu_free_slab); 2681 else 2682 __free_slab(s, slab); 2683 } 2684 2685 static void discard_slab(struct kmem_cache *s, struct slab *slab) 2686 { 2687 dec_slabs_node(s, slab_nid(slab), slab->objects); 2688 free_slab(s, slab); 2689 } 2690 2691 /* 2692 * SLUB reuses PG_workingset bit to keep track of whether it's on 2693 * the per-node partial list. 2694 */ 2695 static inline bool slab_test_node_partial(const struct slab *slab) 2696 { 2697 return folio_test_workingset(slab_folio(slab)); 2698 } 2699 2700 static inline void slab_set_node_partial(struct slab *slab) 2701 { 2702 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2703 } 2704 2705 static inline void slab_clear_node_partial(struct slab *slab) 2706 { 2707 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2708 } 2709 2710 /* 2711 * Management of partially allocated slabs. 2712 */ 2713 static inline void 2714 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 2715 { 2716 n->nr_partial++; 2717 if (tail == DEACTIVATE_TO_TAIL) 2718 list_add_tail(&slab->slab_list, &n->partial); 2719 else 2720 list_add(&slab->slab_list, &n->partial); 2721 slab_set_node_partial(slab); 2722 } 2723 2724 static inline void add_partial(struct kmem_cache_node *n, 2725 struct slab *slab, int tail) 2726 { 2727 lockdep_assert_held(&n->list_lock); 2728 __add_partial(n, slab, tail); 2729 } 2730 2731 static inline void remove_partial(struct kmem_cache_node *n, 2732 struct slab *slab) 2733 { 2734 lockdep_assert_held(&n->list_lock); 2735 list_del(&slab->slab_list); 2736 slab_clear_node_partial(slab); 2737 n->nr_partial--; 2738 } 2739 2740 /* 2741 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a 2742 * slab from the n->partial list. Remove only a single object from the slab, do 2743 * the alloc_debug_processing() checks and leave the slab on the list, or move 2744 * it to full list if it was the last free object. 2745 */ 2746 static void *alloc_single_from_partial(struct kmem_cache *s, 2747 struct kmem_cache_node *n, struct slab *slab, int orig_size) 2748 { 2749 void *object; 2750 2751 lockdep_assert_held(&n->list_lock); 2752 2753 object = slab->freelist; 2754 slab->freelist = get_freepointer(s, object); 2755 slab->inuse++; 2756 2757 if (!alloc_debug_processing(s, slab, object, orig_size)) { 2758 if (folio_test_slab(slab_folio(slab))) 2759 remove_partial(n, slab); 2760 return NULL; 2761 } 2762 2763 if (slab->inuse == slab->objects) { 2764 remove_partial(n, slab); 2765 add_full(s, n, slab); 2766 } 2767 2768 return object; 2769 } 2770 2771 /* 2772 * Called only for kmem_cache_debug() caches to allocate from a freshly 2773 * allocated slab. Allocate a single object instead of whole freelist 2774 * and put the slab to the partial (or full) list. 2775 */ 2776 static void *alloc_single_from_new_slab(struct kmem_cache *s, 2777 struct slab *slab, int orig_size) 2778 { 2779 int nid = slab_nid(slab); 2780 struct kmem_cache_node *n = get_node(s, nid); 2781 unsigned long flags; 2782 void *object; 2783 2784 2785 object = slab->freelist; 2786 slab->freelist = get_freepointer(s, object); 2787 slab->inuse = 1; 2788 2789 if (!alloc_debug_processing(s, slab, object, orig_size)) 2790 /* 2791 * It's not really expected that this would fail on a 2792 * freshly allocated slab, but a concurrent memory 2793 * corruption in theory could cause that. 2794 */ 2795 return NULL; 2796 2797 spin_lock_irqsave(&n->list_lock, flags); 2798 2799 if (slab->inuse == slab->objects) 2800 add_full(s, n, slab); 2801 else 2802 add_partial(n, slab, DEACTIVATE_TO_HEAD); 2803 2804 inc_slabs_node(s, nid, slab->objects); 2805 spin_unlock_irqrestore(&n->list_lock, flags); 2806 2807 return object; 2808 } 2809 2810 #ifdef CONFIG_SLUB_CPU_PARTIAL 2811 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 2812 #else 2813 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 2814 int drain) { } 2815 #endif 2816 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 2817 2818 /* 2819 * Try to allocate a partial slab from a specific node. 2820 */ 2821 static struct slab *get_partial_node(struct kmem_cache *s, 2822 struct kmem_cache_node *n, 2823 struct partial_context *pc) 2824 { 2825 struct slab *slab, *slab2, *partial = NULL; 2826 unsigned long flags; 2827 unsigned int partial_slabs = 0; 2828 2829 /* 2830 * Racy check. If we mistakenly see no partial slabs then we 2831 * just allocate an empty slab. If we mistakenly try to get a 2832 * partial slab and there is none available then get_partial() 2833 * will return NULL. 2834 */ 2835 if (!n || !n->nr_partial) 2836 return NULL; 2837 2838 spin_lock_irqsave(&n->list_lock, flags); 2839 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 2840 if (!pfmemalloc_match(slab, pc->flags)) 2841 continue; 2842 2843 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 2844 void *object = alloc_single_from_partial(s, n, slab, 2845 pc->orig_size); 2846 if (object) { 2847 partial = slab; 2848 pc->object = object; 2849 break; 2850 } 2851 continue; 2852 } 2853 2854 remove_partial(n, slab); 2855 2856 if (!partial) { 2857 partial = slab; 2858 stat(s, ALLOC_FROM_PARTIAL); 2859 2860 if ((slub_get_cpu_partial(s) == 0)) { 2861 break; 2862 } 2863 } else { 2864 put_cpu_partial(s, slab, 0); 2865 stat(s, CPU_PARTIAL_NODE); 2866 2867 if (++partial_slabs > slub_get_cpu_partial(s) / 2) { 2868 break; 2869 } 2870 } 2871 } 2872 spin_unlock_irqrestore(&n->list_lock, flags); 2873 return partial; 2874 } 2875 2876 /* 2877 * Get a slab from somewhere. Search in increasing NUMA distances. 2878 */ 2879 static struct slab *get_any_partial(struct kmem_cache *s, 2880 struct partial_context *pc) 2881 { 2882 #ifdef CONFIG_NUMA 2883 struct zonelist *zonelist; 2884 struct zoneref *z; 2885 struct zone *zone; 2886 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 2887 struct slab *slab; 2888 unsigned int cpuset_mems_cookie; 2889 2890 /* 2891 * The defrag ratio allows a configuration of the tradeoffs between 2892 * inter node defragmentation and node local allocations. A lower 2893 * defrag_ratio increases the tendency to do local allocations 2894 * instead of attempting to obtain partial slabs from other nodes. 2895 * 2896 * If the defrag_ratio is set to 0 then kmalloc() always 2897 * returns node local objects. If the ratio is higher then kmalloc() 2898 * may return off node objects because partial slabs are obtained 2899 * from other nodes and filled up. 2900 * 2901 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2902 * (which makes defrag_ratio = 1000) then every (well almost) 2903 * allocation will first attempt to defrag slab caches on other nodes. 2904 * This means scanning over all nodes to look for partial slabs which 2905 * may be expensive if we do it every time we are trying to find a slab 2906 * with available objects. 2907 */ 2908 if (!s->remote_node_defrag_ratio || 2909 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2910 return NULL; 2911 2912 do { 2913 cpuset_mems_cookie = read_mems_allowed_begin(); 2914 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 2915 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2916 struct kmem_cache_node *n; 2917 2918 n = get_node(s, zone_to_nid(zone)); 2919 2920 if (n && cpuset_zone_allowed(zone, pc->flags) && 2921 n->nr_partial > s->min_partial) { 2922 slab = get_partial_node(s, n, pc); 2923 if (slab) { 2924 /* 2925 * Don't check read_mems_allowed_retry() 2926 * here - if mems_allowed was updated in 2927 * parallel, that was a harmless race 2928 * between allocation and the cpuset 2929 * update 2930 */ 2931 return slab; 2932 } 2933 } 2934 } 2935 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2936 #endif /* CONFIG_NUMA */ 2937 return NULL; 2938 } 2939 2940 /* 2941 * Get a partial slab, lock it and return it. 2942 */ 2943 static struct slab *get_partial(struct kmem_cache *s, int node, 2944 struct partial_context *pc) 2945 { 2946 struct slab *slab; 2947 int searchnode = node; 2948 2949 if (node == NUMA_NO_NODE) 2950 searchnode = numa_mem_id(); 2951 2952 slab = get_partial_node(s, get_node(s, searchnode), pc); 2953 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) 2954 return slab; 2955 2956 return get_any_partial(s, pc); 2957 } 2958 2959 #ifndef CONFIG_SLUB_TINY 2960 2961 #ifdef CONFIG_PREEMPTION 2962 /* 2963 * Calculate the next globally unique transaction for disambiguation 2964 * during cmpxchg. The transactions start with the cpu number and are then 2965 * incremented by CONFIG_NR_CPUS. 2966 */ 2967 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2968 #else 2969 /* 2970 * No preemption supported therefore also no need to check for 2971 * different cpus. 2972 */ 2973 #define TID_STEP 1 2974 #endif /* CONFIG_PREEMPTION */ 2975 2976 static inline unsigned long next_tid(unsigned long tid) 2977 { 2978 return tid + TID_STEP; 2979 } 2980 2981 #ifdef SLUB_DEBUG_CMPXCHG 2982 static inline unsigned int tid_to_cpu(unsigned long tid) 2983 { 2984 return tid % TID_STEP; 2985 } 2986 2987 static inline unsigned long tid_to_event(unsigned long tid) 2988 { 2989 return tid / TID_STEP; 2990 } 2991 #endif 2992 2993 static inline unsigned int init_tid(int cpu) 2994 { 2995 return cpu; 2996 } 2997 2998 static inline void note_cmpxchg_failure(const char *n, 2999 const struct kmem_cache *s, unsigned long tid) 3000 { 3001 #ifdef SLUB_DEBUG_CMPXCHG 3002 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 3003 3004 pr_info("%s %s: cmpxchg redo ", n, s->name); 3005 3006 #ifdef CONFIG_PREEMPTION 3007 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 3008 pr_warn("due to cpu change %d -> %d\n", 3009 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 3010 else 3011 #endif 3012 if (tid_to_event(tid) != tid_to_event(actual_tid)) 3013 pr_warn("due to cpu running other code. Event %ld->%ld\n", 3014 tid_to_event(tid), tid_to_event(actual_tid)); 3015 else 3016 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 3017 actual_tid, tid, next_tid(tid)); 3018 #endif 3019 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 3020 } 3021 3022 static void init_kmem_cache_cpus(struct kmem_cache *s) 3023 { 3024 int cpu; 3025 struct kmem_cache_cpu *c; 3026 3027 for_each_possible_cpu(cpu) { 3028 c = per_cpu_ptr(s->cpu_slab, cpu); 3029 local_lock_init(&c->lock); 3030 c->tid = init_tid(cpu); 3031 } 3032 } 3033 3034 /* 3035 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 3036 * unfreezes the slabs and puts it on the proper list. 3037 * Assumes the slab has been already safely taken away from kmem_cache_cpu 3038 * by the caller. 3039 */ 3040 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 3041 void *freelist) 3042 { 3043 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 3044 int free_delta = 0; 3045 void *nextfree, *freelist_iter, *freelist_tail; 3046 int tail = DEACTIVATE_TO_HEAD; 3047 unsigned long flags = 0; 3048 struct slab new; 3049 struct slab old; 3050 3051 if (READ_ONCE(slab->freelist)) { 3052 stat(s, DEACTIVATE_REMOTE_FREES); 3053 tail = DEACTIVATE_TO_TAIL; 3054 } 3055 3056 /* 3057 * Stage one: Count the objects on cpu's freelist as free_delta and 3058 * remember the last object in freelist_tail for later splicing. 3059 */ 3060 freelist_tail = NULL; 3061 freelist_iter = freelist; 3062 while (freelist_iter) { 3063 nextfree = get_freepointer(s, freelist_iter); 3064 3065 /* 3066 * If 'nextfree' is invalid, it is possible that the object at 3067 * 'freelist_iter' is already corrupted. So isolate all objects 3068 * starting at 'freelist_iter' by skipping them. 3069 */ 3070 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 3071 break; 3072 3073 freelist_tail = freelist_iter; 3074 free_delta++; 3075 3076 freelist_iter = nextfree; 3077 } 3078 3079 /* 3080 * Stage two: Unfreeze the slab while splicing the per-cpu 3081 * freelist to the head of slab's freelist. 3082 */ 3083 do { 3084 old.freelist = READ_ONCE(slab->freelist); 3085 old.counters = READ_ONCE(slab->counters); 3086 VM_BUG_ON(!old.frozen); 3087 3088 /* Determine target state of the slab */ 3089 new.counters = old.counters; 3090 new.frozen = 0; 3091 if (freelist_tail) { 3092 new.inuse -= free_delta; 3093 set_freepointer(s, freelist_tail, old.freelist); 3094 new.freelist = freelist; 3095 } else { 3096 new.freelist = old.freelist; 3097 } 3098 } while (!slab_update_freelist(s, slab, 3099 old.freelist, old.counters, 3100 new.freelist, new.counters, 3101 "unfreezing slab")); 3102 3103 /* 3104 * Stage three: Manipulate the slab list based on the updated state. 3105 */ 3106 if (!new.inuse && n->nr_partial >= s->min_partial) { 3107 stat(s, DEACTIVATE_EMPTY); 3108 discard_slab(s, slab); 3109 stat(s, FREE_SLAB); 3110 } else if (new.freelist) { 3111 spin_lock_irqsave(&n->list_lock, flags); 3112 add_partial(n, slab, tail); 3113 spin_unlock_irqrestore(&n->list_lock, flags); 3114 stat(s, tail); 3115 } else { 3116 stat(s, DEACTIVATE_FULL); 3117 } 3118 } 3119 3120 #ifdef CONFIG_SLUB_CPU_PARTIAL 3121 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) 3122 { 3123 struct kmem_cache_node *n = NULL, *n2 = NULL; 3124 struct slab *slab, *slab_to_discard = NULL; 3125 unsigned long flags = 0; 3126 3127 while (partial_slab) { 3128 slab = partial_slab; 3129 partial_slab = slab->next; 3130 3131 n2 = get_node(s, slab_nid(slab)); 3132 if (n != n2) { 3133 if (n) 3134 spin_unlock_irqrestore(&n->list_lock, flags); 3135 3136 n = n2; 3137 spin_lock_irqsave(&n->list_lock, flags); 3138 } 3139 3140 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { 3141 slab->next = slab_to_discard; 3142 slab_to_discard = slab; 3143 } else { 3144 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3145 stat(s, FREE_ADD_PARTIAL); 3146 } 3147 } 3148 3149 if (n) 3150 spin_unlock_irqrestore(&n->list_lock, flags); 3151 3152 while (slab_to_discard) { 3153 slab = slab_to_discard; 3154 slab_to_discard = slab_to_discard->next; 3155 3156 stat(s, DEACTIVATE_EMPTY); 3157 discard_slab(s, slab); 3158 stat(s, FREE_SLAB); 3159 } 3160 } 3161 3162 /* 3163 * Put all the cpu partial slabs to the node partial list. 3164 */ 3165 static void put_partials(struct kmem_cache *s) 3166 { 3167 struct slab *partial_slab; 3168 unsigned long flags; 3169 3170 local_lock_irqsave(&s->cpu_slab->lock, flags); 3171 partial_slab = this_cpu_read(s->cpu_slab->partial); 3172 this_cpu_write(s->cpu_slab->partial, NULL); 3173 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3174 3175 if (partial_slab) 3176 __put_partials(s, partial_slab); 3177 } 3178 3179 static void put_partials_cpu(struct kmem_cache *s, 3180 struct kmem_cache_cpu *c) 3181 { 3182 struct slab *partial_slab; 3183 3184 partial_slab = slub_percpu_partial(c); 3185 c->partial = NULL; 3186 3187 if (partial_slab) 3188 __put_partials(s, partial_slab); 3189 } 3190 3191 /* 3192 * Put a slab into a partial slab slot if available. 3193 * 3194 * If we did not find a slot then simply move all the partials to the 3195 * per node partial list. 3196 */ 3197 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 3198 { 3199 struct slab *oldslab; 3200 struct slab *slab_to_put = NULL; 3201 unsigned long flags; 3202 int slabs = 0; 3203 3204 local_lock_irqsave(&s->cpu_slab->lock, flags); 3205 3206 oldslab = this_cpu_read(s->cpu_slab->partial); 3207 3208 if (oldslab) { 3209 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 3210 /* 3211 * Partial array is full. Move the existing set to the 3212 * per node partial list. Postpone the actual unfreezing 3213 * outside of the critical section. 3214 */ 3215 slab_to_put = oldslab; 3216 oldslab = NULL; 3217 } else { 3218 slabs = oldslab->slabs; 3219 } 3220 } 3221 3222 slabs++; 3223 3224 slab->slabs = slabs; 3225 slab->next = oldslab; 3226 3227 this_cpu_write(s->cpu_slab->partial, slab); 3228 3229 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3230 3231 if (slab_to_put) { 3232 __put_partials(s, slab_to_put); 3233 stat(s, CPU_PARTIAL_DRAIN); 3234 } 3235 } 3236 3237 #else /* CONFIG_SLUB_CPU_PARTIAL */ 3238 3239 static inline void put_partials(struct kmem_cache *s) { } 3240 static inline void put_partials_cpu(struct kmem_cache *s, 3241 struct kmem_cache_cpu *c) { } 3242 3243 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 3244 3245 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 3246 { 3247 unsigned long flags; 3248 struct slab *slab; 3249 void *freelist; 3250 3251 local_lock_irqsave(&s->cpu_slab->lock, flags); 3252 3253 slab = c->slab; 3254 freelist = c->freelist; 3255 3256 c->slab = NULL; 3257 c->freelist = NULL; 3258 c->tid = next_tid(c->tid); 3259 3260 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3261 3262 if (slab) { 3263 deactivate_slab(s, slab, freelist); 3264 stat(s, CPUSLAB_FLUSH); 3265 } 3266 } 3267 3268 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 3269 { 3270 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3271 void *freelist = c->freelist; 3272 struct slab *slab = c->slab; 3273 3274 c->slab = NULL; 3275 c->freelist = NULL; 3276 c->tid = next_tid(c->tid); 3277 3278 if (slab) { 3279 deactivate_slab(s, slab, freelist); 3280 stat(s, CPUSLAB_FLUSH); 3281 } 3282 3283 put_partials_cpu(s, c); 3284 } 3285 3286 struct slub_flush_work { 3287 struct work_struct work; 3288 struct kmem_cache *s; 3289 bool skip; 3290 }; 3291 3292 /* 3293 * Flush cpu slab. 3294 * 3295 * Called from CPU work handler with migration disabled. 3296 */ 3297 static void flush_cpu_slab(struct work_struct *w) 3298 { 3299 struct kmem_cache *s; 3300 struct kmem_cache_cpu *c; 3301 struct slub_flush_work *sfw; 3302 3303 sfw = container_of(w, struct slub_flush_work, work); 3304 3305 s = sfw->s; 3306 c = this_cpu_ptr(s->cpu_slab); 3307 3308 if (c->slab) 3309 flush_slab(s, c); 3310 3311 put_partials(s); 3312 } 3313 3314 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 3315 { 3316 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3317 3318 return c->slab || slub_percpu_partial(c); 3319 } 3320 3321 static DEFINE_MUTEX(flush_lock); 3322 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 3323 3324 static void flush_all_cpus_locked(struct kmem_cache *s) 3325 { 3326 struct slub_flush_work *sfw; 3327 unsigned int cpu; 3328 3329 lockdep_assert_cpus_held(); 3330 mutex_lock(&flush_lock); 3331 3332 for_each_online_cpu(cpu) { 3333 sfw = &per_cpu(slub_flush, cpu); 3334 if (!has_cpu_slab(cpu, s)) { 3335 sfw->skip = true; 3336 continue; 3337 } 3338 INIT_WORK(&sfw->work, flush_cpu_slab); 3339 sfw->skip = false; 3340 sfw->s = s; 3341 queue_work_on(cpu, flushwq, &sfw->work); 3342 } 3343 3344 for_each_online_cpu(cpu) { 3345 sfw = &per_cpu(slub_flush, cpu); 3346 if (sfw->skip) 3347 continue; 3348 flush_work(&sfw->work); 3349 } 3350 3351 mutex_unlock(&flush_lock); 3352 } 3353 3354 static void flush_all(struct kmem_cache *s) 3355 { 3356 cpus_read_lock(); 3357 flush_all_cpus_locked(s); 3358 cpus_read_unlock(); 3359 } 3360 3361 /* 3362 * Use the cpu notifier to insure that the cpu slabs are flushed when 3363 * necessary. 3364 */ 3365 static int slub_cpu_dead(unsigned int cpu) 3366 { 3367 struct kmem_cache *s; 3368 3369 mutex_lock(&slab_mutex); 3370 list_for_each_entry(s, &slab_caches, list) 3371 __flush_cpu_slab(s, cpu); 3372 mutex_unlock(&slab_mutex); 3373 return 0; 3374 } 3375 3376 #else /* CONFIG_SLUB_TINY */ 3377 static inline void flush_all_cpus_locked(struct kmem_cache *s) { } 3378 static inline void flush_all(struct kmem_cache *s) { } 3379 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } 3380 static inline int slub_cpu_dead(unsigned int cpu) { return 0; } 3381 #endif /* CONFIG_SLUB_TINY */ 3382 3383 /* 3384 * Check if the objects in a per cpu structure fit numa 3385 * locality expectations. 3386 */ 3387 static inline int node_match(struct slab *slab, int node) 3388 { 3389 #ifdef CONFIG_NUMA 3390 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 3391 return 0; 3392 #endif 3393 return 1; 3394 } 3395 3396 #ifdef CONFIG_SLUB_DEBUG 3397 static int count_free(struct slab *slab) 3398 { 3399 return slab->objects - slab->inuse; 3400 } 3401 3402 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 3403 { 3404 return atomic_long_read(&n->total_objects); 3405 } 3406 3407 /* Supports checking bulk free of a constructed freelist */ 3408 static inline bool free_debug_processing(struct kmem_cache *s, 3409 struct slab *slab, void *head, void *tail, int *bulk_cnt, 3410 unsigned long addr, depot_stack_handle_t handle) 3411 { 3412 bool checks_ok = false; 3413 void *object = head; 3414 int cnt = 0; 3415 3416 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3417 if (!check_slab(s, slab)) 3418 goto out; 3419 } 3420 3421 if (slab->inuse < *bulk_cnt) { 3422 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 3423 slab->inuse, *bulk_cnt); 3424 goto out; 3425 } 3426 3427 next_object: 3428 3429 if (++cnt > *bulk_cnt) 3430 goto out_cnt; 3431 3432 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3433 if (!free_consistency_checks(s, slab, object, addr)) 3434 goto out; 3435 } 3436 3437 if (s->flags & SLAB_STORE_USER) 3438 set_track_update(s, object, TRACK_FREE, addr, handle); 3439 trace(s, slab, object, 0); 3440 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 3441 init_object(s, object, SLUB_RED_INACTIVE); 3442 3443 /* Reached end of constructed freelist yet? */ 3444 if (object != tail) { 3445 object = get_freepointer(s, object); 3446 goto next_object; 3447 } 3448 checks_ok = true; 3449 3450 out_cnt: 3451 if (cnt != *bulk_cnt) { 3452 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 3453 *bulk_cnt, cnt); 3454 *bulk_cnt = cnt; 3455 } 3456 3457 out: 3458 3459 if (!checks_ok) 3460 slab_fix(s, "Object at 0x%p not freed", object); 3461 3462 return checks_ok; 3463 } 3464 #endif /* CONFIG_SLUB_DEBUG */ 3465 3466 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 3467 static unsigned long count_partial(struct kmem_cache_node *n, 3468 int (*get_count)(struct slab *)) 3469 { 3470 unsigned long flags; 3471 unsigned long x = 0; 3472 struct slab *slab; 3473 3474 spin_lock_irqsave(&n->list_lock, flags); 3475 list_for_each_entry(slab, &n->partial, slab_list) 3476 x += get_count(slab); 3477 spin_unlock_irqrestore(&n->list_lock, flags); 3478 return x; 3479 } 3480 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 3481 3482 #ifdef CONFIG_SLUB_DEBUG 3483 #define MAX_PARTIAL_TO_SCAN 10000 3484 3485 static unsigned long count_partial_free_approx(struct kmem_cache_node *n) 3486 { 3487 unsigned long flags; 3488 unsigned long x = 0; 3489 struct slab *slab; 3490 3491 spin_lock_irqsave(&n->list_lock, flags); 3492 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) { 3493 list_for_each_entry(slab, &n->partial, slab_list) 3494 x += slab->objects - slab->inuse; 3495 } else { 3496 /* 3497 * For a long list, approximate the total count of objects in 3498 * it to meet the limit on the number of slabs to scan. 3499 * Scan from both the list's head and tail for better accuracy. 3500 */ 3501 unsigned long scanned = 0; 3502 3503 list_for_each_entry(slab, &n->partial, slab_list) { 3504 x += slab->objects - slab->inuse; 3505 if (++scanned == MAX_PARTIAL_TO_SCAN / 2) 3506 break; 3507 } 3508 list_for_each_entry_reverse(slab, &n->partial, slab_list) { 3509 x += slab->objects - slab->inuse; 3510 if (++scanned == MAX_PARTIAL_TO_SCAN) 3511 break; 3512 } 3513 x = mult_frac(x, n->nr_partial, scanned); 3514 x = min(x, node_nr_objs(n)); 3515 } 3516 spin_unlock_irqrestore(&n->list_lock, flags); 3517 return x; 3518 } 3519 3520 static noinline void 3521 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 3522 { 3523 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 3524 DEFAULT_RATELIMIT_BURST); 3525 int cpu = raw_smp_processor_id(); 3526 int node; 3527 struct kmem_cache_node *n; 3528 3529 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 3530 return; 3531 3532 pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n", 3533 cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags); 3534 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 3535 s->name, s->object_size, s->size, oo_order(s->oo), 3536 oo_order(s->min)); 3537 3538 if (oo_order(s->min) > get_order(s->object_size)) 3539 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", 3540 s->name); 3541 3542 for_each_kmem_cache_node(s, node, n) { 3543 unsigned long nr_slabs; 3544 unsigned long nr_objs; 3545 unsigned long nr_free; 3546 3547 nr_free = count_partial_free_approx(n); 3548 nr_slabs = node_nr_slabs(n); 3549 nr_objs = node_nr_objs(n); 3550 3551 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 3552 node, nr_slabs, nr_objs, nr_free); 3553 } 3554 } 3555 #else /* CONFIG_SLUB_DEBUG */ 3556 static inline void 3557 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 3558 #endif 3559 3560 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 3561 { 3562 if (unlikely(slab_test_pfmemalloc(slab))) 3563 return gfp_pfmemalloc_allowed(gfpflags); 3564 3565 return true; 3566 } 3567 3568 #ifndef CONFIG_SLUB_TINY 3569 static inline bool 3570 __update_cpu_freelist_fast(struct kmem_cache *s, 3571 void *freelist_old, void *freelist_new, 3572 unsigned long tid) 3573 { 3574 freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; 3575 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; 3576 3577 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, 3578 &old.full, new.full); 3579 } 3580 3581 /* 3582 * Check the slab->freelist and either transfer the freelist to the 3583 * per cpu freelist or deactivate the slab. 3584 * 3585 * The slab is still frozen if the return value is not NULL. 3586 * 3587 * If this function returns NULL then the slab has been unfrozen. 3588 */ 3589 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 3590 { 3591 struct slab new; 3592 unsigned long counters; 3593 void *freelist; 3594 3595 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3596 3597 do { 3598 freelist = slab->freelist; 3599 counters = slab->counters; 3600 3601 new.counters = counters; 3602 3603 new.inuse = slab->objects; 3604 new.frozen = freelist != NULL; 3605 3606 } while (!__slab_update_freelist(s, slab, 3607 freelist, counters, 3608 NULL, new.counters, 3609 "get_freelist")); 3610 3611 return freelist; 3612 } 3613 3614 /* 3615 * Freeze the partial slab and return the pointer to the freelist. 3616 */ 3617 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) 3618 { 3619 struct slab new; 3620 unsigned long counters; 3621 void *freelist; 3622 3623 do { 3624 freelist = slab->freelist; 3625 counters = slab->counters; 3626 3627 new.counters = counters; 3628 VM_BUG_ON(new.frozen); 3629 3630 new.inuse = slab->objects; 3631 new.frozen = 1; 3632 3633 } while (!slab_update_freelist(s, slab, 3634 freelist, counters, 3635 NULL, new.counters, 3636 "freeze_slab")); 3637 3638 return freelist; 3639 } 3640 3641 /* 3642 * Slow path. The lockless freelist is empty or we need to perform 3643 * debugging duties. 3644 * 3645 * Processing is still very fast if new objects have been freed to the 3646 * regular freelist. In that case we simply take over the regular freelist 3647 * as the lockless freelist and zap the regular freelist. 3648 * 3649 * If that is not working then we fall back to the partial lists. We take the 3650 * first element of the freelist as the object to allocate now and move the 3651 * rest of the freelist to the lockless freelist. 3652 * 3653 * And if we were unable to get a new slab from the partial slab lists then 3654 * we need to allocate a new slab. This is the slowest path since it involves 3655 * a call to the page allocator and the setup of a new slab. 3656 * 3657 * Version of __slab_alloc to use when we know that preemption is 3658 * already disabled (which is the case for bulk allocation). 3659 */ 3660 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3661 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3662 { 3663 void *freelist; 3664 struct slab *slab; 3665 unsigned long flags; 3666 struct partial_context pc; 3667 bool try_thisnode = true; 3668 3669 stat(s, ALLOC_SLOWPATH); 3670 3671 reread_slab: 3672 3673 slab = READ_ONCE(c->slab); 3674 if (!slab) { 3675 /* 3676 * if the node is not online or has no normal memory, just 3677 * ignore the node constraint 3678 */ 3679 if (unlikely(node != NUMA_NO_NODE && 3680 !node_isset(node, slab_nodes))) 3681 node = NUMA_NO_NODE; 3682 goto new_slab; 3683 } 3684 3685 if (unlikely(!node_match(slab, node))) { 3686 /* 3687 * same as above but node_match() being false already 3688 * implies node != NUMA_NO_NODE 3689 */ 3690 if (!node_isset(node, slab_nodes)) { 3691 node = NUMA_NO_NODE; 3692 } else { 3693 stat(s, ALLOC_NODE_MISMATCH); 3694 goto deactivate_slab; 3695 } 3696 } 3697 3698 /* 3699 * By rights, we should be searching for a slab page that was 3700 * PFMEMALLOC but right now, we are losing the pfmemalloc 3701 * information when the page leaves the per-cpu allocator 3702 */ 3703 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 3704 goto deactivate_slab; 3705 3706 /* must check again c->slab in case we got preempted and it changed */ 3707 local_lock_irqsave(&s->cpu_slab->lock, flags); 3708 if (unlikely(slab != c->slab)) { 3709 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3710 goto reread_slab; 3711 } 3712 freelist = c->freelist; 3713 if (freelist) 3714 goto load_freelist; 3715 3716 freelist = get_freelist(s, slab); 3717 3718 if (!freelist) { 3719 c->slab = NULL; 3720 c->tid = next_tid(c->tid); 3721 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3722 stat(s, DEACTIVATE_BYPASS); 3723 goto new_slab; 3724 } 3725 3726 stat(s, ALLOC_REFILL); 3727 3728 load_freelist: 3729 3730 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3731 3732 /* 3733 * freelist is pointing to the list of objects to be used. 3734 * slab is pointing to the slab from which the objects are obtained. 3735 * That slab must be frozen for per cpu allocations to work. 3736 */ 3737 VM_BUG_ON(!c->slab->frozen); 3738 c->freelist = get_freepointer(s, freelist); 3739 c->tid = next_tid(c->tid); 3740 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3741 return freelist; 3742 3743 deactivate_slab: 3744 3745 local_lock_irqsave(&s->cpu_slab->lock, flags); 3746 if (slab != c->slab) { 3747 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3748 goto reread_slab; 3749 } 3750 freelist = c->freelist; 3751 c->slab = NULL; 3752 c->freelist = NULL; 3753 c->tid = next_tid(c->tid); 3754 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3755 deactivate_slab(s, slab, freelist); 3756 3757 new_slab: 3758 3759 #ifdef CONFIG_SLUB_CPU_PARTIAL 3760 while (slub_percpu_partial(c)) { 3761 local_lock_irqsave(&s->cpu_slab->lock, flags); 3762 if (unlikely(c->slab)) { 3763 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3764 goto reread_slab; 3765 } 3766 if (unlikely(!slub_percpu_partial(c))) { 3767 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3768 /* we were preempted and partial list got empty */ 3769 goto new_objects; 3770 } 3771 3772 slab = slub_percpu_partial(c); 3773 slub_set_percpu_partial(c, slab); 3774 3775 if (likely(node_match(slab, node) && 3776 pfmemalloc_match(slab, gfpflags))) { 3777 c->slab = slab; 3778 freelist = get_freelist(s, slab); 3779 VM_BUG_ON(!freelist); 3780 stat(s, CPU_PARTIAL_ALLOC); 3781 goto load_freelist; 3782 } 3783 3784 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3785 3786 slab->next = NULL; 3787 __put_partials(s, slab); 3788 } 3789 #endif 3790 3791 new_objects: 3792 3793 pc.flags = gfpflags; 3794 /* 3795 * When a preferred node is indicated but no __GFP_THISNODE 3796 * 3797 * 1) try to get a partial slab from target node only by having 3798 * __GFP_THISNODE in pc.flags for get_partial() 3799 * 2) if 1) failed, try to allocate a new slab from target node with 3800 * GPF_NOWAIT | __GFP_THISNODE opportunistically 3801 * 3) if 2) failed, retry with original gfpflags which will allow 3802 * get_partial() try partial lists of other nodes before potentially 3803 * allocating new page from other nodes 3804 */ 3805 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 3806 && try_thisnode)) 3807 pc.flags = GFP_NOWAIT | __GFP_THISNODE; 3808 3809 pc.orig_size = orig_size; 3810 slab = get_partial(s, node, &pc); 3811 if (slab) { 3812 if (kmem_cache_debug(s)) { 3813 freelist = pc.object; 3814 /* 3815 * For debug caches here we had to go through 3816 * alloc_single_from_partial() so just store the 3817 * tracking info and return the object. 3818 */ 3819 if (s->flags & SLAB_STORE_USER) 3820 set_track(s, freelist, TRACK_ALLOC, addr); 3821 3822 return freelist; 3823 } 3824 3825 freelist = freeze_slab(s, slab); 3826 goto retry_load_slab; 3827 } 3828 3829 slub_put_cpu_ptr(s->cpu_slab); 3830 slab = new_slab(s, pc.flags, node); 3831 c = slub_get_cpu_ptr(s->cpu_slab); 3832 3833 if (unlikely(!slab)) { 3834 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 3835 && try_thisnode) { 3836 try_thisnode = false; 3837 goto new_objects; 3838 } 3839 slab_out_of_memory(s, gfpflags, node); 3840 return NULL; 3841 } 3842 3843 stat(s, ALLOC_SLAB); 3844 3845 if (kmem_cache_debug(s)) { 3846 freelist = alloc_single_from_new_slab(s, slab, orig_size); 3847 3848 if (unlikely(!freelist)) 3849 goto new_objects; 3850 3851 if (s->flags & SLAB_STORE_USER) 3852 set_track(s, freelist, TRACK_ALLOC, addr); 3853 3854 return freelist; 3855 } 3856 3857 /* 3858 * No other reference to the slab yet so we can 3859 * muck around with it freely without cmpxchg 3860 */ 3861 freelist = slab->freelist; 3862 slab->freelist = NULL; 3863 slab->inuse = slab->objects; 3864 slab->frozen = 1; 3865 3866 inc_slabs_node(s, slab_nid(slab), slab->objects); 3867 3868 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { 3869 /* 3870 * For !pfmemalloc_match() case we don't load freelist so that 3871 * we don't make further mismatched allocations easier. 3872 */ 3873 deactivate_slab(s, slab, get_freepointer(s, freelist)); 3874 return freelist; 3875 } 3876 3877 retry_load_slab: 3878 3879 local_lock_irqsave(&s->cpu_slab->lock, flags); 3880 if (unlikely(c->slab)) { 3881 void *flush_freelist = c->freelist; 3882 struct slab *flush_slab = c->slab; 3883 3884 c->slab = NULL; 3885 c->freelist = NULL; 3886 c->tid = next_tid(c->tid); 3887 3888 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3889 3890 deactivate_slab(s, flush_slab, flush_freelist); 3891 3892 stat(s, CPUSLAB_FLUSH); 3893 3894 goto retry_load_slab; 3895 } 3896 c->slab = slab; 3897 3898 goto load_freelist; 3899 } 3900 3901 /* 3902 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3903 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3904 * pointer. 3905 */ 3906 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3907 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3908 { 3909 void *p; 3910 3911 #ifdef CONFIG_PREEMPT_COUNT 3912 /* 3913 * We may have been preempted and rescheduled on a different 3914 * cpu before disabling preemption. Need to reload cpu area 3915 * pointer. 3916 */ 3917 c = slub_get_cpu_ptr(s->cpu_slab); 3918 #endif 3919 3920 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 3921 #ifdef CONFIG_PREEMPT_COUNT 3922 slub_put_cpu_ptr(s->cpu_slab); 3923 #endif 3924 return p; 3925 } 3926 3927 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 3928 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3929 { 3930 struct kmem_cache_cpu *c; 3931 struct slab *slab; 3932 unsigned long tid; 3933 void *object; 3934 3935 redo: 3936 /* 3937 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3938 * enabled. We may switch back and forth between cpus while 3939 * reading from one cpu area. That does not matter as long 3940 * as we end up on the original cpu again when doing the cmpxchg. 3941 * 3942 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3943 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3944 * the tid. If we are preempted and switched to another cpu between the 3945 * two reads, it's OK as the two are still associated with the same cpu 3946 * and cmpxchg later will validate the cpu. 3947 */ 3948 c = raw_cpu_ptr(s->cpu_slab); 3949 tid = READ_ONCE(c->tid); 3950 3951 /* 3952 * Irqless object alloc/free algorithm used here depends on sequence 3953 * of fetching cpu_slab's data. tid should be fetched before anything 3954 * on c to guarantee that object and slab associated with previous tid 3955 * won't be used with current tid. If we fetch tid first, object and 3956 * slab could be one associated with next tid and our alloc/free 3957 * request will be failed. In this case, we will retry. So, no problem. 3958 */ 3959 barrier(); 3960 3961 /* 3962 * The transaction ids are globally unique per cpu and per operation on 3963 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3964 * occurs on the right processor and that there was no operation on the 3965 * linked list in between. 3966 */ 3967 3968 object = c->freelist; 3969 slab = c->slab; 3970 3971 #ifdef CONFIG_NUMA 3972 if (static_branch_unlikely(&strict_numa) && 3973 node == NUMA_NO_NODE) { 3974 3975 struct mempolicy *mpol = current->mempolicy; 3976 3977 if (mpol) { 3978 /* 3979 * Special BIND rule support. If existing slab 3980 * is in permitted set then do not redirect 3981 * to a particular node. 3982 * Otherwise we apply the memory policy to get 3983 * the node we need to allocate on. 3984 */ 3985 if (mpol->mode != MPOL_BIND || !slab || 3986 !node_isset(slab_nid(slab), mpol->nodes)) 3987 3988 node = mempolicy_slab_node(); 3989 } 3990 } 3991 #endif 3992 3993 if (!USE_LOCKLESS_FAST_PATH() || 3994 unlikely(!object || !slab || !node_match(slab, node))) { 3995 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 3996 } else { 3997 void *next_object = get_freepointer_safe(s, object); 3998 3999 /* 4000 * The cmpxchg will only match if there was no additional 4001 * operation and if we are on the right processor. 4002 * 4003 * The cmpxchg does the following atomically (without lock 4004 * semantics!) 4005 * 1. Relocate first pointer to the current per cpu area. 4006 * 2. Verify that tid and freelist have not been changed 4007 * 3. If they were not changed replace tid and freelist 4008 * 4009 * Since this is without lock semantics the protection is only 4010 * against code executing on this cpu *not* from access by 4011 * other cpus. 4012 */ 4013 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 4014 note_cmpxchg_failure("slab_alloc", s, tid); 4015 goto redo; 4016 } 4017 prefetch_freepointer(s, next_object); 4018 stat(s, ALLOC_FASTPATH); 4019 } 4020 4021 return object; 4022 } 4023 #else /* CONFIG_SLUB_TINY */ 4024 static void *__slab_alloc_node(struct kmem_cache *s, 4025 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 4026 { 4027 struct partial_context pc; 4028 struct slab *slab; 4029 void *object; 4030 4031 pc.flags = gfpflags; 4032 pc.orig_size = orig_size; 4033 slab = get_partial(s, node, &pc); 4034 4035 if (slab) 4036 return pc.object; 4037 4038 slab = new_slab(s, gfpflags, node); 4039 if (unlikely(!slab)) { 4040 slab_out_of_memory(s, gfpflags, node); 4041 return NULL; 4042 } 4043 4044 object = alloc_single_from_new_slab(s, slab, orig_size); 4045 4046 return object; 4047 } 4048 #endif /* CONFIG_SLUB_TINY */ 4049 4050 /* 4051 * If the object has been wiped upon free, make sure it's fully initialized by 4052 * zeroing out freelist pointer. 4053 * 4054 * Note that we also wipe custom freelist pointers. 4055 */ 4056 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 4057 void *obj) 4058 { 4059 if (unlikely(slab_want_init_on_free(s)) && obj && 4060 !freeptr_outside_object(s)) 4061 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 4062 0, sizeof(void *)); 4063 } 4064 4065 static __fastpath_inline 4066 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 4067 { 4068 flags &= gfp_allowed_mask; 4069 4070 might_alloc(flags); 4071 4072 if (unlikely(should_failslab(s, flags))) 4073 return NULL; 4074 4075 return s; 4076 } 4077 4078 static __fastpath_inline 4079 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 4080 gfp_t flags, size_t size, void **p, bool init, 4081 unsigned int orig_size) 4082 { 4083 unsigned int zero_size = s->object_size; 4084 bool kasan_init = init; 4085 size_t i; 4086 gfp_t init_flags = flags & gfp_allowed_mask; 4087 4088 /* 4089 * For kmalloc object, the allocated memory size(object_size) is likely 4090 * larger than the requested size(orig_size). If redzone check is 4091 * enabled for the extra space, don't zero it, as it will be redzoned 4092 * soon. The redzone operation for this extra space could be seen as a 4093 * replacement of current poisoning under certain debug option, and 4094 * won't break other sanity checks. 4095 */ 4096 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 4097 (s->flags & SLAB_KMALLOC)) 4098 zero_size = orig_size; 4099 4100 /* 4101 * When slab_debug is enabled, avoid memory initialization integrated 4102 * into KASAN and instead zero out the memory via the memset below with 4103 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and 4104 * cause false-positive reports. This does not lead to a performance 4105 * penalty on production builds, as slab_debug is not intended to be 4106 * enabled there. 4107 */ 4108 if (__slub_debug_enabled()) 4109 kasan_init = false; 4110 4111 /* 4112 * As memory initialization might be integrated into KASAN, 4113 * kasan_slab_alloc and initialization memset must be 4114 * kept together to avoid discrepancies in behavior. 4115 * 4116 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 4117 */ 4118 for (i = 0; i < size; i++) { 4119 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); 4120 if (p[i] && init && (!kasan_init || 4121 !kasan_has_integrated_init())) 4122 memset(p[i], 0, zero_size); 4123 kmemleak_alloc_recursive(p[i], s->object_size, 1, 4124 s->flags, init_flags); 4125 kmsan_slab_alloc(s, p[i], init_flags); 4126 alloc_tagging_slab_alloc_hook(s, p[i], flags); 4127 } 4128 4129 return memcg_slab_post_alloc_hook(s, lru, flags, size, p); 4130 } 4131 4132 /* 4133 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 4134 * have the fastpath folded into their functions. So no function call 4135 * overhead for requests that can be satisfied on the fastpath. 4136 * 4137 * The fastpath works by first checking if the lockless freelist can be used. 4138 * If not then __slab_alloc is called for slow processing. 4139 * 4140 * Otherwise we can simply pick the next object from the lockless free list. 4141 */ 4142 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 4143 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 4144 { 4145 void *object; 4146 bool init = false; 4147 4148 s = slab_pre_alloc_hook(s, gfpflags); 4149 if (unlikely(!s)) 4150 return NULL; 4151 4152 object = kfence_alloc(s, orig_size, gfpflags); 4153 if (unlikely(object)) 4154 goto out; 4155 4156 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 4157 4158 maybe_wipe_obj_freeptr(s, object); 4159 init = slab_want_init_on_alloc(gfpflags, s); 4160 4161 out: 4162 /* 4163 * When init equals 'true', like for kzalloc() family, only 4164 * @orig_size bytes might be zeroed instead of s->object_size 4165 * In case this fails due to memcg_slab_post_alloc_hook(), 4166 * object is set to NULL 4167 */ 4168 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size); 4169 4170 return object; 4171 } 4172 4173 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags) 4174 { 4175 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, 4176 s->object_size); 4177 4178 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4179 4180 return ret; 4181 } 4182 EXPORT_SYMBOL(kmem_cache_alloc_noprof); 4183 4184 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, 4185 gfp_t gfpflags) 4186 { 4187 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, 4188 s->object_size); 4189 4190 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4191 4192 return ret; 4193 } 4194 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); 4195 4196 bool kmem_cache_charge(void *objp, gfp_t gfpflags) 4197 { 4198 if (!memcg_kmem_online()) 4199 return true; 4200 4201 return memcg_slab_post_charge(objp, gfpflags); 4202 } 4203 EXPORT_SYMBOL(kmem_cache_charge); 4204 4205 /** 4206 * kmem_cache_alloc_node - Allocate an object on the specified node 4207 * @s: The cache to allocate from. 4208 * @gfpflags: See kmalloc(). 4209 * @node: node number of the target node. 4210 * 4211 * Identical to kmem_cache_alloc but it will allocate memory on the given 4212 * node, which can improve the performance for cpu bound structures. 4213 * 4214 * Fallback to other node is possible if __GFP_THISNODE is not set. 4215 * 4216 * Return: pointer to the new object or %NULL in case of error 4217 */ 4218 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node) 4219 { 4220 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 4221 4222 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 4223 4224 return ret; 4225 } 4226 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); 4227 4228 /* 4229 * To avoid unnecessary overhead, we pass through large allocation requests 4230 * directly to the page allocator. We use __GFP_COMP, because we will need to 4231 * know the allocation order to free the pages properly in kfree. 4232 */ 4233 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) 4234 { 4235 struct folio *folio; 4236 void *ptr = NULL; 4237 unsigned int order = get_order(size); 4238 4239 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 4240 flags = kmalloc_fix_flags(flags); 4241 4242 flags |= __GFP_COMP; 4243 folio = (struct folio *)alloc_pages_node_noprof(node, flags, order); 4244 if (folio) { 4245 ptr = folio_address(folio); 4246 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4247 PAGE_SIZE << order); 4248 } 4249 4250 ptr = kasan_kmalloc_large(ptr, size, flags); 4251 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 4252 kmemleak_alloc(ptr, size, 1, flags); 4253 kmsan_kmalloc_large(ptr, size, flags); 4254 4255 return ptr; 4256 } 4257 4258 void *__kmalloc_large_noprof(size_t size, gfp_t flags) 4259 { 4260 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE); 4261 4262 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4263 flags, NUMA_NO_NODE); 4264 return ret; 4265 } 4266 EXPORT_SYMBOL(__kmalloc_large_noprof); 4267 4268 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) 4269 { 4270 void *ret = ___kmalloc_large_node(size, flags, node); 4271 4272 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4273 flags, node); 4274 return ret; 4275 } 4276 EXPORT_SYMBOL(__kmalloc_large_node_noprof); 4277 4278 static __always_inline 4279 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node, 4280 unsigned long caller) 4281 { 4282 struct kmem_cache *s; 4283 void *ret; 4284 4285 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4286 ret = __kmalloc_large_node_noprof(size, flags, node); 4287 trace_kmalloc(caller, ret, size, 4288 PAGE_SIZE << get_order(size), flags, node); 4289 return ret; 4290 } 4291 4292 if (unlikely(!size)) 4293 return ZERO_SIZE_PTR; 4294 4295 s = kmalloc_slab(size, b, flags, caller); 4296 4297 ret = slab_alloc_node(s, NULL, flags, node, caller, size); 4298 ret = kasan_kmalloc(s, ret, size, flags); 4299 trace_kmalloc(caller, ret, size, s->size, flags, node); 4300 return ret; 4301 } 4302 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) 4303 { 4304 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_); 4305 } 4306 EXPORT_SYMBOL(__kmalloc_node_noprof); 4307 4308 void *__kmalloc_noprof(size_t size, gfp_t flags) 4309 { 4310 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_); 4311 } 4312 EXPORT_SYMBOL(__kmalloc_noprof); 4313 4314 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, 4315 int node, unsigned long caller) 4316 { 4317 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller); 4318 4319 } 4320 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof); 4321 4322 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) 4323 { 4324 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 4325 _RET_IP_, size); 4326 4327 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 4328 4329 ret = kasan_kmalloc(s, ret, size, gfpflags); 4330 return ret; 4331 } 4332 EXPORT_SYMBOL(__kmalloc_cache_noprof); 4333 4334 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, 4335 int node, size_t size) 4336 { 4337 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); 4338 4339 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 4340 4341 ret = kasan_kmalloc(s, ret, size, gfpflags); 4342 return ret; 4343 } 4344 EXPORT_SYMBOL(__kmalloc_cache_node_noprof); 4345 4346 static noinline void free_to_partial_list( 4347 struct kmem_cache *s, struct slab *slab, 4348 void *head, void *tail, int bulk_cnt, 4349 unsigned long addr) 4350 { 4351 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 4352 struct slab *slab_free = NULL; 4353 int cnt = bulk_cnt; 4354 unsigned long flags; 4355 depot_stack_handle_t handle = 0; 4356 4357 if (s->flags & SLAB_STORE_USER) 4358 handle = set_track_prepare(); 4359 4360 spin_lock_irqsave(&n->list_lock, flags); 4361 4362 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 4363 void *prior = slab->freelist; 4364 4365 /* Perform the actual freeing while we still hold the locks */ 4366 slab->inuse -= cnt; 4367 set_freepointer(s, tail, prior); 4368 slab->freelist = head; 4369 4370 /* 4371 * If the slab is empty, and node's partial list is full, 4372 * it should be discarded anyway no matter it's on full or 4373 * partial list. 4374 */ 4375 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 4376 slab_free = slab; 4377 4378 if (!prior) { 4379 /* was on full list */ 4380 remove_full(s, n, slab); 4381 if (!slab_free) { 4382 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4383 stat(s, FREE_ADD_PARTIAL); 4384 } 4385 } else if (slab_free) { 4386 remove_partial(n, slab); 4387 stat(s, FREE_REMOVE_PARTIAL); 4388 } 4389 } 4390 4391 if (slab_free) { 4392 /* 4393 * Update the counters while still holding n->list_lock to 4394 * prevent spurious validation warnings 4395 */ 4396 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 4397 } 4398 4399 spin_unlock_irqrestore(&n->list_lock, flags); 4400 4401 if (slab_free) { 4402 stat(s, FREE_SLAB); 4403 free_slab(s, slab_free); 4404 } 4405 } 4406 4407 /* 4408 * Slow path handling. This may still be called frequently since objects 4409 * have a longer lifetime than the cpu slabs in most processing loads. 4410 * 4411 * So we still attempt to reduce cache line usage. Just take the slab 4412 * lock and free the item. If there is no additional partial slab 4413 * handling required then we can return immediately. 4414 */ 4415 static void __slab_free(struct kmem_cache *s, struct slab *slab, 4416 void *head, void *tail, int cnt, 4417 unsigned long addr) 4418 4419 { 4420 void *prior; 4421 int was_frozen; 4422 struct slab new; 4423 unsigned long counters; 4424 struct kmem_cache_node *n = NULL; 4425 unsigned long flags; 4426 bool on_node_partial; 4427 4428 stat(s, FREE_SLOWPATH); 4429 4430 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 4431 free_to_partial_list(s, slab, head, tail, cnt, addr); 4432 return; 4433 } 4434 4435 do { 4436 if (unlikely(n)) { 4437 spin_unlock_irqrestore(&n->list_lock, flags); 4438 n = NULL; 4439 } 4440 prior = slab->freelist; 4441 counters = slab->counters; 4442 set_freepointer(s, tail, prior); 4443 new.counters = counters; 4444 was_frozen = new.frozen; 4445 new.inuse -= cnt; 4446 if ((!new.inuse || !prior) && !was_frozen) { 4447 /* Needs to be taken off a list */ 4448 if (!kmem_cache_has_cpu_partial(s) || prior) { 4449 4450 n = get_node(s, slab_nid(slab)); 4451 /* 4452 * Speculatively acquire the list_lock. 4453 * If the cmpxchg does not succeed then we may 4454 * drop the list_lock without any processing. 4455 * 4456 * Otherwise the list_lock will synchronize with 4457 * other processors updating the list of slabs. 4458 */ 4459 spin_lock_irqsave(&n->list_lock, flags); 4460 4461 on_node_partial = slab_test_node_partial(slab); 4462 } 4463 } 4464 4465 } while (!slab_update_freelist(s, slab, 4466 prior, counters, 4467 head, new.counters, 4468 "__slab_free")); 4469 4470 if (likely(!n)) { 4471 4472 if (likely(was_frozen)) { 4473 /* 4474 * The list lock was not taken therefore no list 4475 * activity can be necessary. 4476 */ 4477 stat(s, FREE_FROZEN); 4478 } else if (kmem_cache_has_cpu_partial(s) && !prior) { 4479 /* 4480 * If we started with a full slab then put it onto the 4481 * per cpu partial list. 4482 */ 4483 put_cpu_partial(s, slab, 1); 4484 stat(s, CPU_PARTIAL_FREE); 4485 } 4486 4487 return; 4488 } 4489 4490 /* 4491 * This slab was partially empty but not on the per-node partial list, 4492 * in which case we shouldn't manipulate its list, just return. 4493 */ 4494 if (prior && !on_node_partial) { 4495 spin_unlock_irqrestore(&n->list_lock, flags); 4496 return; 4497 } 4498 4499 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 4500 goto slab_empty; 4501 4502 /* 4503 * Objects left in the slab. If it was not on the partial list before 4504 * then add it. 4505 */ 4506 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 4507 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4508 stat(s, FREE_ADD_PARTIAL); 4509 } 4510 spin_unlock_irqrestore(&n->list_lock, flags); 4511 return; 4512 4513 slab_empty: 4514 if (prior) { 4515 /* 4516 * Slab on the partial list. 4517 */ 4518 remove_partial(n, slab); 4519 stat(s, FREE_REMOVE_PARTIAL); 4520 } 4521 4522 spin_unlock_irqrestore(&n->list_lock, flags); 4523 stat(s, FREE_SLAB); 4524 discard_slab(s, slab); 4525 } 4526 4527 #ifndef CONFIG_SLUB_TINY 4528 /* 4529 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 4530 * can perform fastpath freeing without additional function calls. 4531 * 4532 * The fastpath is only possible if we are freeing to the current cpu slab 4533 * of this processor. This typically the case if we have just allocated 4534 * the item before. 4535 * 4536 * If fastpath is not possible then fall back to __slab_free where we deal 4537 * with all sorts of special processing. 4538 * 4539 * Bulk free of a freelist with several objects (all pointing to the 4540 * same slab) possible by specifying head and tail ptr, plus objects 4541 * count (cnt). Bulk free indicated by tail pointer being set. 4542 */ 4543 static __always_inline void do_slab_free(struct kmem_cache *s, 4544 struct slab *slab, void *head, void *tail, 4545 int cnt, unsigned long addr) 4546 { 4547 struct kmem_cache_cpu *c; 4548 unsigned long tid; 4549 void **freelist; 4550 4551 redo: 4552 /* 4553 * Determine the currently cpus per cpu slab. 4554 * The cpu may change afterward. However that does not matter since 4555 * data is retrieved via this pointer. If we are on the same cpu 4556 * during the cmpxchg then the free will succeed. 4557 */ 4558 c = raw_cpu_ptr(s->cpu_slab); 4559 tid = READ_ONCE(c->tid); 4560 4561 /* Same with comment on barrier() in __slab_alloc_node() */ 4562 barrier(); 4563 4564 if (unlikely(slab != c->slab)) { 4565 __slab_free(s, slab, head, tail, cnt, addr); 4566 return; 4567 } 4568 4569 if (USE_LOCKLESS_FAST_PATH()) { 4570 freelist = READ_ONCE(c->freelist); 4571 4572 set_freepointer(s, tail, freelist); 4573 4574 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 4575 note_cmpxchg_failure("slab_free", s, tid); 4576 goto redo; 4577 } 4578 } else { 4579 /* Update the free list under the local lock */ 4580 local_lock(&s->cpu_slab->lock); 4581 c = this_cpu_ptr(s->cpu_slab); 4582 if (unlikely(slab != c->slab)) { 4583 local_unlock(&s->cpu_slab->lock); 4584 goto redo; 4585 } 4586 tid = c->tid; 4587 freelist = c->freelist; 4588 4589 set_freepointer(s, tail, freelist); 4590 c->freelist = head; 4591 c->tid = next_tid(tid); 4592 4593 local_unlock(&s->cpu_slab->lock); 4594 } 4595 stat_add(s, FREE_FASTPATH, cnt); 4596 } 4597 #else /* CONFIG_SLUB_TINY */ 4598 static void do_slab_free(struct kmem_cache *s, 4599 struct slab *slab, void *head, void *tail, 4600 int cnt, unsigned long addr) 4601 { 4602 __slab_free(s, slab, head, tail, cnt, addr); 4603 } 4604 #endif /* CONFIG_SLUB_TINY */ 4605 4606 static __fastpath_inline 4607 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, 4608 unsigned long addr) 4609 { 4610 memcg_slab_free_hook(s, slab, &object, 1); 4611 alloc_tagging_slab_free_hook(s, slab, &object, 1); 4612 4613 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) 4614 do_slab_free(s, slab, object, object, 1, addr); 4615 } 4616 4617 #ifdef CONFIG_MEMCG 4618 /* Do not inline the rare memcg charging failed path into the allocation path */ 4619 static noinline 4620 void memcg_alloc_abort_single(struct kmem_cache *s, void *object) 4621 { 4622 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) 4623 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); 4624 } 4625 #endif 4626 4627 static __fastpath_inline 4628 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, 4629 void *tail, void **p, int cnt, unsigned long addr) 4630 { 4631 memcg_slab_free_hook(s, slab, p, cnt); 4632 alloc_tagging_slab_free_hook(s, slab, p, cnt); 4633 /* 4634 * With KASAN enabled slab_free_freelist_hook modifies the freelist 4635 * to remove objects, whose reuse must be delayed. 4636 */ 4637 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) 4638 do_slab_free(s, slab, head, tail, cnt, addr); 4639 } 4640 4641 #ifdef CONFIG_SLUB_RCU_DEBUG 4642 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head) 4643 { 4644 struct rcu_delayed_free *delayed_free = 4645 container_of(rcu_head, struct rcu_delayed_free, head); 4646 void *object = delayed_free->object; 4647 struct slab *slab = virt_to_slab(object); 4648 struct kmem_cache *s; 4649 4650 kfree(delayed_free); 4651 4652 if (WARN_ON(is_kfence_address(object))) 4653 return; 4654 4655 /* find the object and the cache again */ 4656 if (WARN_ON(!slab)) 4657 return; 4658 s = slab->slab_cache; 4659 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU))) 4660 return; 4661 4662 /* resume freeing */ 4663 if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) 4664 do_slab_free(s, slab, object, object, 1, _THIS_IP_); 4665 } 4666 #endif /* CONFIG_SLUB_RCU_DEBUG */ 4667 4668 #ifdef CONFIG_KASAN_GENERIC 4669 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 4670 { 4671 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); 4672 } 4673 #endif 4674 4675 static inline struct kmem_cache *virt_to_cache(const void *obj) 4676 { 4677 struct slab *slab; 4678 4679 slab = virt_to_slab(obj); 4680 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) 4681 return NULL; 4682 return slab->slab_cache; 4683 } 4684 4685 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 4686 { 4687 struct kmem_cache *cachep; 4688 4689 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 4690 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 4691 return s; 4692 4693 cachep = virt_to_cache(x); 4694 if (WARN(cachep && cachep != s, 4695 "%s: Wrong slab cache. %s but object is from %s\n", 4696 __func__, s->name, cachep->name)) 4697 print_tracking(cachep, x); 4698 return cachep; 4699 } 4700 4701 /** 4702 * kmem_cache_free - Deallocate an object 4703 * @s: The cache the allocation was from. 4704 * @x: The previously allocated object. 4705 * 4706 * Free an object which was previously allocated from this 4707 * cache. 4708 */ 4709 void kmem_cache_free(struct kmem_cache *s, void *x) 4710 { 4711 s = cache_from_obj(s, x); 4712 if (!s) 4713 return; 4714 trace_kmem_cache_free(_RET_IP_, x, s); 4715 slab_free(s, virt_to_slab(x), x, _RET_IP_); 4716 } 4717 EXPORT_SYMBOL(kmem_cache_free); 4718 4719 static void free_large_kmalloc(struct folio *folio, void *object) 4720 { 4721 unsigned int order = folio_order(folio); 4722 4723 if (WARN_ON_ONCE(order == 0)) 4724 pr_warn_once("object pointer: 0x%p\n", object); 4725 4726 kmemleak_free(object); 4727 kasan_kfree_large(object); 4728 kmsan_kfree_large(object); 4729 4730 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4731 -(PAGE_SIZE << order)); 4732 folio_put(folio); 4733 } 4734 4735 /** 4736 * kfree - free previously allocated memory 4737 * @object: pointer returned by kmalloc() or kmem_cache_alloc() 4738 * 4739 * If @object is NULL, no operation is performed. 4740 */ 4741 void kfree(const void *object) 4742 { 4743 struct folio *folio; 4744 struct slab *slab; 4745 struct kmem_cache *s; 4746 void *x = (void *)object; 4747 4748 trace_kfree(_RET_IP_, object); 4749 4750 if (unlikely(ZERO_OR_NULL_PTR(object))) 4751 return; 4752 4753 folio = virt_to_folio(object); 4754 if (unlikely(!folio_test_slab(folio))) { 4755 free_large_kmalloc(folio, (void *)object); 4756 return; 4757 } 4758 4759 slab = folio_slab(folio); 4760 s = slab->slab_cache; 4761 slab_free(s, slab, x, _RET_IP_); 4762 } 4763 EXPORT_SYMBOL(kfree); 4764 4765 static __always_inline __realloc_size(2) void * 4766 __do_krealloc(const void *p, size_t new_size, gfp_t flags) 4767 { 4768 void *ret; 4769 size_t ks = 0; 4770 int orig_size = 0; 4771 struct kmem_cache *s = NULL; 4772 4773 if (unlikely(ZERO_OR_NULL_PTR(p))) 4774 goto alloc_new; 4775 4776 /* Check for double-free. */ 4777 if (!kasan_check_byte(p)) 4778 return NULL; 4779 4780 if (is_kfence_address(p)) { 4781 ks = orig_size = kfence_ksize(p); 4782 } else { 4783 struct folio *folio; 4784 4785 folio = virt_to_folio(p); 4786 if (unlikely(!folio_test_slab(folio))) { 4787 /* Big kmalloc object */ 4788 WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE); 4789 WARN_ON(p != folio_address(folio)); 4790 ks = folio_size(folio); 4791 } else { 4792 s = folio_slab(folio)->slab_cache; 4793 orig_size = get_orig_size(s, (void *)p); 4794 ks = s->object_size; 4795 } 4796 } 4797 4798 /* If the old object doesn't fit, allocate a bigger one */ 4799 if (new_size > ks) 4800 goto alloc_new; 4801 4802 /* Zero out spare memory. */ 4803 if (want_init_on_alloc(flags)) { 4804 kasan_disable_current(); 4805 if (orig_size && orig_size < new_size) 4806 memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size); 4807 else 4808 memset(kasan_reset_tag(p) + new_size, 0, ks - new_size); 4809 kasan_enable_current(); 4810 } 4811 4812 /* Setup kmalloc redzone when needed */ 4813 if (s && slub_debug_orig_size(s)) { 4814 set_orig_size(s, (void *)p, new_size); 4815 if (s->flags & SLAB_RED_ZONE && new_size < ks) 4816 memset_no_sanitize_memory(kasan_reset_tag(p) + new_size, 4817 SLUB_RED_ACTIVE, ks - new_size); 4818 } 4819 4820 p = kasan_krealloc(p, new_size, flags); 4821 return (void *)p; 4822 4823 alloc_new: 4824 ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_); 4825 if (ret && p) { 4826 /* Disable KASAN checks as the object's redzone is accessed. */ 4827 kasan_disable_current(); 4828 memcpy(ret, kasan_reset_tag(p), orig_size ?: ks); 4829 kasan_enable_current(); 4830 } 4831 4832 return ret; 4833 } 4834 4835 /** 4836 * krealloc - reallocate memory. The contents will remain unchanged. 4837 * @p: object to reallocate memory for. 4838 * @new_size: how many bytes of memory are required. 4839 * @flags: the type of memory to allocate. 4840 * 4841 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size 4842 * is 0 and @p is not a %NULL pointer, the object pointed to is freed. 4843 * 4844 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 4845 * initial memory allocation, every subsequent call to this API for the same 4846 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 4847 * __GFP_ZERO is not fully honored by this API. 4848 * 4849 * When slub_debug_orig_size() is off, krealloc() only knows about the bucket 4850 * size of an allocation (but not the exact size it was allocated with) and 4851 * hence implements the following semantics for shrinking and growing buffers 4852 * with __GFP_ZERO. 4853 * 4854 * new bucket 4855 * 0 size size 4856 * |--------|----------------| 4857 * | keep | zero | 4858 * 4859 * Otherwise, the original allocation size 'orig_size' could be used to 4860 * precisely clear the requested size, and the new size will also be stored 4861 * as the new 'orig_size'. 4862 * 4863 * In any case, the contents of the object pointed to are preserved up to the 4864 * lesser of the new and old sizes. 4865 * 4866 * Return: pointer to the allocated memory or %NULL in case of error 4867 */ 4868 void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags) 4869 { 4870 void *ret; 4871 4872 if (unlikely(!new_size)) { 4873 kfree(p); 4874 return ZERO_SIZE_PTR; 4875 } 4876 4877 ret = __do_krealloc(p, new_size, flags); 4878 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) 4879 kfree(p); 4880 4881 return ret; 4882 } 4883 EXPORT_SYMBOL(krealloc_noprof); 4884 4885 struct detached_freelist { 4886 struct slab *slab; 4887 void *tail; 4888 void *freelist; 4889 int cnt; 4890 struct kmem_cache *s; 4891 }; 4892 4893 /* 4894 * This function progressively scans the array with free objects (with 4895 * a limited look ahead) and extract objects belonging to the same 4896 * slab. It builds a detached freelist directly within the given 4897 * slab/objects. This can happen without any need for 4898 * synchronization, because the objects are owned by running process. 4899 * The freelist is build up as a single linked list in the objects. 4900 * The idea is, that this detached freelist can then be bulk 4901 * transferred to the real freelist(s), but only requiring a single 4902 * synchronization primitive. Look ahead in the array is limited due 4903 * to performance reasons. 4904 */ 4905 static inline 4906 int build_detached_freelist(struct kmem_cache *s, size_t size, 4907 void **p, struct detached_freelist *df) 4908 { 4909 int lookahead = 3; 4910 void *object; 4911 struct folio *folio; 4912 size_t same; 4913 4914 object = p[--size]; 4915 folio = virt_to_folio(object); 4916 if (!s) { 4917 /* Handle kalloc'ed objects */ 4918 if (unlikely(!folio_test_slab(folio))) { 4919 free_large_kmalloc(folio, object); 4920 df->slab = NULL; 4921 return size; 4922 } 4923 /* Derive kmem_cache from object */ 4924 df->slab = folio_slab(folio); 4925 df->s = df->slab->slab_cache; 4926 } else { 4927 df->slab = folio_slab(folio); 4928 df->s = cache_from_obj(s, object); /* Support for memcg */ 4929 } 4930 4931 /* Start new detached freelist */ 4932 df->tail = object; 4933 df->freelist = object; 4934 df->cnt = 1; 4935 4936 if (is_kfence_address(object)) 4937 return size; 4938 4939 set_freepointer(df->s, object, NULL); 4940 4941 same = size; 4942 while (size) { 4943 object = p[--size]; 4944 /* df->slab is always set at this point */ 4945 if (df->slab == virt_to_slab(object)) { 4946 /* Opportunity build freelist */ 4947 set_freepointer(df->s, object, df->freelist); 4948 df->freelist = object; 4949 df->cnt++; 4950 same--; 4951 if (size != same) 4952 swap(p[size], p[same]); 4953 continue; 4954 } 4955 4956 /* Limit look ahead search */ 4957 if (!--lookahead) 4958 break; 4959 } 4960 4961 return same; 4962 } 4963 4964 /* 4965 * Internal bulk free of objects that were not initialised by the post alloc 4966 * hooks and thus should not be processed by the free hooks 4967 */ 4968 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4969 { 4970 if (!size) 4971 return; 4972 4973 do { 4974 struct detached_freelist df; 4975 4976 size = build_detached_freelist(s, size, p, &df); 4977 if (!df.slab) 4978 continue; 4979 4980 if (kfence_free(df.freelist)) 4981 continue; 4982 4983 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 4984 _RET_IP_); 4985 } while (likely(size)); 4986 } 4987 4988 /* Note that interrupts must be enabled when calling this function. */ 4989 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4990 { 4991 if (!size) 4992 return; 4993 4994 do { 4995 struct detached_freelist df; 4996 4997 size = build_detached_freelist(s, size, p, &df); 4998 if (!df.slab) 4999 continue; 5000 5001 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], 5002 df.cnt, _RET_IP_); 5003 } while (likely(size)); 5004 } 5005 EXPORT_SYMBOL(kmem_cache_free_bulk); 5006 5007 #ifndef CONFIG_SLUB_TINY 5008 static inline 5009 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 5010 void **p) 5011 { 5012 struct kmem_cache_cpu *c; 5013 unsigned long irqflags; 5014 int i; 5015 5016 /* 5017 * Drain objects in the per cpu slab, while disabling local 5018 * IRQs, which protects against PREEMPT and interrupts 5019 * handlers invoking normal fastpath. 5020 */ 5021 c = slub_get_cpu_ptr(s->cpu_slab); 5022 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 5023 5024 for (i = 0; i < size; i++) { 5025 void *object = kfence_alloc(s, s->object_size, flags); 5026 5027 if (unlikely(object)) { 5028 p[i] = object; 5029 continue; 5030 } 5031 5032 object = c->freelist; 5033 if (unlikely(!object)) { 5034 /* 5035 * We may have removed an object from c->freelist using 5036 * the fastpath in the previous iteration; in that case, 5037 * c->tid has not been bumped yet. 5038 * Since ___slab_alloc() may reenable interrupts while 5039 * allocating memory, we should bump c->tid now. 5040 */ 5041 c->tid = next_tid(c->tid); 5042 5043 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 5044 5045 /* 5046 * Invoking slow path likely have side-effect 5047 * of re-populating per CPU c->freelist 5048 */ 5049 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 5050 _RET_IP_, c, s->object_size); 5051 if (unlikely(!p[i])) 5052 goto error; 5053 5054 c = this_cpu_ptr(s->cpu_slab); 5055 maybe_wipe_obj_freeptr(s, p[i]); 5056 5057 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 5058 5059 continue; /* goto for-loop */ 5060 } 5061 c->freelist = get_freepointer(s, object); 5062 p[i] = object; 5063 maybe_wipe_obj_freeptr(s, p[i]); 5064 stat(s, ALLOC_FASTPATH); 5065 } 5066 c->tid = next_tid(c->tid); 5067 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 5068 slub_put_cpu_ptr(s->cpu_slab); 5069 5070 return i; 5071 5072 error: 5073 slub_put_cpu_ptr(s->cpu_slab); 5074 __kmem_cache_free_bulk(s, i, p); 5075 return 0; 5076 5077 } 5078 #else /* CONFIG_SLUB_TINY */ 5079 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 5080 size_t size, void **p) 5081 { 5082 int i; 5083 5084 for (i = 0; i < size; i++) { 5085 void *object = kfence_alloc(s, s->object_size, flags); 5086 5087 if (unlikely(object)) { 5088 p[i] = object; 5089 continue; 5090 } 5091 5092 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, 5093 _RET_IP_, s->object_size); 5094 if (unlikely(!p[i])) 5095 goto error; 5096 5097 maybe_wipe_obj_freeptr(s, p[i]); 5098 } 5099 5100 return i; 5101 5102 error: 5103 __kmem_cache_free_bulk(s, i, p); 5104 return 0; 5105 } 5106 #endif /* CONFIG_SLUB_TINY */ 5107 5108 /* Note that interrupts must be enabled when calling this function. */ 5109 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, 5110 void **p) 5111 { 5112 int i; 5113 5114 if (!size) 5115 return 0; 5116 5117 s = slab_pre_alloc_hook(s, flags); 5118 if (unlikely(!s)) 5119 return 0; 5120 5121 i = __kmem_cache_alloc_bulk(s, flags, size, p); 5122 if (unlikely(i == 0)) 5123 return 0; 5124 5125 /* 5126 * memcg and kmem_cache debug support and memory initialization. 5127 * Done outside of the IRQ disabled fastpath loop. 5128 */ 5129 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p, 5130 slab_want_init_on_alloc(flags, s), s->object_size))) { 5131 return 0; 5132 } 5133 return i; 5134 } 5135 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof); 5136 5137 5138 /* 5139 * Object placement in a slab is made very easy because we always start at 5140 * offset 0. If we tune the size of the object to the alignment then we can 5141 * get the required alignment by putting one properly sized object after 5142 * another. 5143 * 5144 * Notice that the allocation order determines the sizes of the per cpu 5145 * caches. Each processor has always one slab available for allocations. 5146 * Increasing the allocation order reduces the number of times that slabs 5147 * must be moved on and off the partial lists and is therefore a factor in 5148 * locking overhead. 5149 */ 5150 5151 /* 5152 * Minimum / Maximum order of slab pages. This influences locking overhead 5153 * and slab fragmentation. A higher order reduces the number of partial slabs 5154 * and increases the number of allocations possible without having to 5155 * take the list_lock. 5156 */ 5157 static unsigned int slub_min_order; 5158 static unsigned int slub_max_order = 5159 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 5160 static unsigned int slub_min_objects; 5161 5162 /* 5163 * Calculate the order of allocation given an slab object size. 5164 * 5165 * The order of allocation has significant impact on performance and other 5166 * system components. Generally order 0 allocations should be preferred since 5167 * order 0 does not cause fragmentation in the page allocator. Larger objects 5168 * be problematic to put into order 0 slabs because there may be too much 5169 * unused space left. We go to a higher order if more than 1/16th of the slab 5170 * would be wasted. 5171 * 5172 * In order to reach satisfactory performance we must ensure that a minimum 5173 * number of objects is in one slab. Otherwise we may generate too much 5174 * activity on the partial lists which requires taking the list_lock. This is 5175 * less a concern for large slabs though which are rarely used. 5176 * 5177 * slab_max_order specifies the order where we begin to stop considering the 5178 * number of objects in a slab as critical. If we reach slab_max_order then 5179 * we try to keep the page order as low as possible. So we accept more waste 5180 * of space in favor of a small page order. 5181 * 5182 * Higher order allocations also allow the placement of more objects in a 5183 * slab and thereby reduce object handling overhead. If the user has 5184 * requested a higher minimum order then we start with that one instead of 5185 * the smallest order which will fit the object. 5186 */ 5187 static inline unsigned int calc_slab_order(unsigned int size, 5188 unsigned int min_order, unsigned int max_order, 5189 unsigned int fract_leftover) 5190 { 5191 unsigned int order; 5192 5193 for (order = min_order; order <= max_order; order++) { 5194 5195 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 5196 unsigned int rem; 5197 5198 rem = slab_size % size; 5199 5200 if (rem <= slab_size / fract_leftover) 5201 break; 5202 } 5203 5204 return order; 5205 } 5206 5207 static inline int calculate_order(unsigned int size) 5208 { 5209 unsigned int order; 5210 unsigned int min_objects; 5211 unsigned int max_objects; 5212 unsigned int min_order; 5213 5214 min_objects = slub_min_objects; 5215 if (!min_objects) { 5216 /* 5217 * Some architectures will only update present cpus when 5218 * onlining them, so don't trust the number if it's just 1. But 5219 * we also don't want to use nr_cpu_ids always, as on some other 5220 * architectures, there can be many possible cpus, but never 5221 * onlined. Here we compromise between trying to avoid too high 5222 * order on systems that appear larger than they are, and too 5223 * low order on systems that appear smaller than they are. 5224 */ 5225 unsigned int nr_cpus = num_present_cpus(); 5226 if (nr_cpus <= 1) 5227 nr_cpus = nr_cpu_ids; 5228 min_objects = 4 * (fls(nr_cpus) + 1); 5229 } 5230 /* min_objects can't be 0 because get_order(0) is undefined */ 5231 max_objects = max(order_objects(slub_max_order, size), 1U); 5232 min_objects = min(min_objects, max_objects); 5233 5234 min_order = max_t(unsigned int, slub_min_order, 5235 get_order(min_objects * size)); 5236 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 5237 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 5238 5239 /* 5240 * Attempt to find best configuration for a slab. This works by first 5241 * attempting to generate a layout with the best possible configuration 5242 * and backing off gradually. 5243 * 5244 * We start with accepting at most 1/16 waste and try to find the 5245 * smallest order from min_objects-derived/slab_min_order up to 5246 * slab_max_order that will satisfy the constraint. Note that increasing 5247 * the order can only result in same or less fractional waste, not more. 5248 * 5249 * If that fails, we increase the acceptable fraction of waste and try 5250 * again. The last iteration with fraction of 1/2 would effectively 5251 * accept any waste and give us the order determined by min_objects, as 5252 * long as at least single object fits within slab_max_order. 5253 */ 5254 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { 5255 order = calc_slab_order(size, min_order, slub_max_order, 5256 fraction); 5257 if (order <= slub_max_order) 5258 return order; 5259 } 5260 5261 /* 5262 * Doh this slab cannot be placed using slab_max_order. 5263 */ 5264 order = get_order(size); 5265 if (order <= MAX_PAGE_ORDER) 5266 return order; 5267 return -ENOSYS; 5268 } 5269 5270 static void 5271 init_kmem_cache_node(struct kmem_cache_node *n) 5272 { 5273 n->nr_partial = 0; 5274 spin_lock_init(&n->list_lock); 5275 INIT_LIST_HEAD(&n->partial); 5276 #ifdef CONFIG_SLUB_DEBUG 5277 atomic_long_set(&n->nr_slabs, 0); 5278 atomic_long_set(&n->total_objects, 0); 5279 INIT_LIST_HEAD(&n->full); 5280 #endif 5281 } 5282 5283 #ifndef CONFIG_SLUB_TINY 5284 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 5285 { 5286 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 5287 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 5288 sizeof(struct kmem_cache_cpu)); 5289 5290 /* 5291 * Must align to double word boundary for the double cmpxchg 5292 * instructions to work; see __pcpu_double_call_return_bool(). 5293 */ 5294 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 5295 2 * sizeof(void *)); 5296 5297 if (!s->cpu_slab) 5298 return 0; 5299 5300 init_kmem_cache_cpus(s); 5301 5302 return 1; 5303 } 5304 #else 5305 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 5306 { 5307 return 1; 5308 } 5309 #endif /* CONFIG_SLUB_TINY */ 5310 5311 static struct kmem_cache *kmem_cache_node; 5312 5313 /* 5314 * No kmalloc_node yet so do it by hand. We know that this is the first 5315 * slab on the node for this slabcache. There are no concurrent accesses 5316 * possible. 5317 * 5318 * Note that this function only works on the kmem_cache_node 5319 * when allocating for the kmem_cache_node. This is used for bootstrapping 5320 * memory on a fresh node that has no slab structures yet. 5321 */ 5322 static void early_kmem_cache_node_alloc(int node) 5323 { 5324 struct slab *slab; 5325 struct kmem_cache_node *n; 5326 5327 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 5328 5329 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 5330 5331 BUG_ON(!slab); 5332 if (slab_nid(slab) != node) { 5333 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 5334 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 5335 } 5336 5337 n = slab->freelist; 5338 BUG_ON(!n); 5339 #ifdef CONFIG_SLUB_DEBUG 5340 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 5341 #endif 5342 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 5343 slab->freelist = get_freepointer(kmem_cache_node, n); 5344 slab->inuse = 1; 5345 kmem_cache_node->node[node] = n; 5346 init_kmem_cache_node(n); 5347 inc_slabs_node(kmem_cache_node, node, slab->objects); 5348 5349 /* 5350 * No locks need to be taken here as it has just been 5351 * initialized and there is no concurrent access. 5352 */ 5353 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 5354 } 5355 5356 static void free_kmem_cache_nodes(struct kmem_cache *s) 5357 { 5358 int node; 5359 struct kmem_cache_node *n; 5360 5361 for_each_kmem_cache_node(s, node, n) { 5362 s->node[node] = NULL; 5363 kmem_cache_free(kmem_cache_node, n); 5364 } 5365 } 5366 5367 void __kmem_cache_release(struct kmem_cache *s) 5368 { 5369 cache_random_seq_destroy(s); 5370 #ifndef CONFIG_SLUB_TINY 5371 free_percpu(s->cpu_slab); 5372 #endif 5373 free_kmem_cache_nodes(s); 5374 } 5375 5376 static int init_kmem_cache_nodes(struct kmem_cache *s) 5377 { 5378 int node; 5379 5380 for_each_node_mask(node, slab_nodes) { 5381 struct kmem_cache_node *n; 5382 5383 if (slab_state == DOWN) { 5384 early_kmem_cache_node_alloc(node); 5385 continue; 5386 } 5387 n = kmem_cache_alloc_node(kmem_cache_node, 5388 GFP_KERNEL, node); 5389 5390 if (!n) { 5391 free_kmem_cache_nodes(s); 5392 return 0; 5393 } 5394 5395 init_kmem_cache_node(n); 5396 s->node[node] = n; 5397 } 5398 return 1; 5399 } 5400 5401 static void set_cpu_partial(struct kmem_cache *s) 5402 { 5403 #ifdef CONFIG_SLUB_CPU_PARTIAL 5404 unsigned int nr_objects; 5405 5406 /* 5407 * cpu_partial determined the maximum number of objects kept in the 5408 * per cpu partial lists of a processor. 5409 * 5410 * Per cpu partial lists mainly contain slabs that just have one 5411 * object freed. If they are used for allocation then they can be 5412 * filled up again with minimal effort. The slab will never hit the 5413 * per node partial lists and therefore no locking will be required. 5414 * 5415 * For backwards compatibility reasons, this is determined as number 5416 * of objects, even though we now limit maximum number of pages, see 5417 * slub_set_cpu_partial() 5418 */ 5419 if (!kmem_cache_has_cpu_partial(s)) 5420 nr_objects = 0; 5421 else if (s->size >= PAGE_SIZE) 5422 nr_objects = 6; 5423 else if (s->size >= 1024) 5424 nr_objects = 24; 5425 else if (s->size >= 256) 5426 nr_objects = 52; 5427 else 5428 nr_objects = 120; 5429 5430 slub_set_cpu_partial(s, nr_objects); 5431 #endif 5432 } 5433 5434 /* 5435 * calculate_sizes() determines the order and the distribution of data within 5436 * a slab object. 5437 */ 5438 static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) 5439 { 5440 slab_flags_t flags = s->flags; 5441 unsigned int size = s->object_size; 5442 unsigned int order; 5443 5444 /* 5445 * Round up object size to the next word boundary. We can only 5446 * place the free pointer at word boundaries and this determines 5447 * the possible location of the free pointer. 5448 */ 5449 size = ALIGN(size, sizeof(void *)); 5450 5451 #ifdef CONFIG_SLUB_DEBUG 5452 /* 5453 * Determine if we can poison the object itself. If the user of 5454 * the slab may touch the object after free or before allocation 5455 * then we should never poison the object itself. 5456 */ 5457 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 5458 !s->ctor) 5459 s->flags |= __OBJECT_POISON; 5460 else 5461 s->flags &= ~__OBJECT_POISON; 5462 5463 5464 /* 5465 * If we are Redzoning then check if there is some space between the 5466 * end of the object and the free pointer. If not then add an 5467 * additional word to have some bytes to store Redzone information. 5468 */ 5469 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 5470 size += sizeof(void *); 5471 #endif 5472 5473 /* 5474 * With that we have determined the number of bytes in actual use 5475 * by the object and redzoning. 5476 */ 5477 s->inuse = size; 5478 5479 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) || 5480 (flags & SLAB_POISON) || s->ctor || 5481 ((flags & SLAB_RED_ZONE) && 5482 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { 5483 /* 5484 * Relocate free pointer after the object if it is not 5485 * permitted to overwrite the first word of the object on 5486 * kmem_cache_free. 5487 * 5488 * This is the case if we do RCU, have a constructor or 5489 * destructor, are poisoning the objects, or are 5490 * redzoning an object smaller than sizeof(void *) or are 5491 * redzoning an object with slub_debug_orig_size() enabled, 5492 * in which case the right redzone may be extended. 5493 * 5494 * The assumption that s->offset >= s->inuse means free 5495 * pointer is outside of the object is used in the 5496 * freeptr_outside_object() function. If that is no 5497 * longer true, the function needs to be modified. 5498 */ 5499 s->offset = size; 5500 size += sizeof(void *); 5501 } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) { 5502 s->offset = args->freeptr_offset; 5503 } else { 5504 /* 5505 * Store freelist pointer near middle of object to keep 5506 * it away from the edges of the object to avoid small 5507 * sized over/underflows from neighboring allocations. 5508 */ 5509 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 5510 } 5511 5512 #ifdef CONFIG_SLUB_DEBUG 5513 if (flags & SLAB_STORE_USER) { 5514 /* 5515 * Need to store information about allocs and frees after 5516 * the object. 5517 */ 5518 size += 2 * sizeof(struct track); 5519 5520 /* Save the original kmalloc request size */ 5521 if (flags & SLAB_KMALLOC) 5522 size += sizeof(unsigned int); 5523 } 5524 #endif 5525 5526 kasan_cache_create(s, &size, &s->flags); 5527 #ifdef CONFIG_SLUB_DEBUG 5528 if (flags & SLAB_RED_ZONE) { 5529 /* 5530 * Add some empty padding so that we can catch 5531 * overwrites from earlier objects rather than let 5532 * tracking information or the free pointer be 5533 * corrupted if a user writes before the start 5534 * of the object. 5535 */ 5536 size += sizeof(void *); 5537 5538 s->red_left_pad = sizeof(void *); 5539 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 5540 size += s->red_left_pad; 5541 } 5542 #endif 5543 5544 /* 5545 * SLUB stores one object immediately after another beginning from 5546 * offset 0. In order to align the objects we have to simply size 5547 * each object to conform to the alignment. 5548 */ 5549 size = ALIGN(size, s->align); 5550 s->size = size; 5551 s->reciprocal_size = reciprocal_value(size); 5552 order = calculate_order(size); 5553 5554 if ((int)order < 0) 5555 return 0; 5556 5557 s->allocflags = __GFP_COMP; 5558 5559 if (s->flags & SLAB_CACHE_DMA) 5560 s->allocflags |= GFP_DMA; 5561 5562 if (s->flags & SLAB_CACHE_DMA32) 5563 s->allocflags |= GFP_DMA32; 5564 5565 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5566 s->allocflags |= __GFP_RECLAIMABLE; 5567 5568 /* 5569 * Determine the number of objects per slab 5570 */ 5571 s->oo = oo_make(order, size); 5572 s->min = oo_make(get_order(size), size); 5573 5574 return !!oo_objects(s->oo); 5575 } 5576 5577 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, 5578 const char *text) 5579 { 5580 #ifdef CONFIG_SLUB_DEBUG 5581 void *addr = slab_address(slab); 5582 void *p; 5583 5584 slab_err(s, slab, text, s->name); 5585 5586 spin_lock(&object_map_lock); 5587 __fill_map(object_map, s, slab); 5588 5589 for_each_object(p, s, addr, slab->objects) { 5590 5591 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 5592 if (slab_add_kunit_errors()) 5593 continue; 5594 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 5595 print_tracking(s, p); 5596 } 5597 } 5598 spin_unlock(&object_map_lock); 5599 #endif 5600 } 5601 5602 /* 5603 * Attempt to free all partial slabs on a node. 5604 * This is called from __kmem_cache_shutdown(). We must take list_lock 5605 * because sysfs file might still access partial list after the shutdowning. 5606 */ 5607 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 5608 { 5609 LIST_HEAD(discard); 5610 struct slab *slab, *h; 5611 5612 BUG_ON(irqs_disabled()); 5613 spin_lock_irq(&n->list_lock); 5614 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 5615 if (!slab->inuse) { 5616 remove_partial(n, slab); 5617 list_add(&slab->slab_list, &discard); 5618 } else { 5619 list_slab_objects(s, slab, 5620 "Objects remaining in %s on __kmem_cache_shutdown()"); 5621 } 5622 } 5623 spin_unlock_irq(&n->list_lock); 5624 5625 list_for_each_entry_safe(slab, h, &discard, slab_list) 5626 discard_slab(s, slab); 5627 } 5628 5629 bool __kmem_cache_empty(struct kmem_cache *s) 5630 { 5631 int node; 5632 struct kmem_cache_node *n; 5633 5634 for_each_kmem_cache_node(s, node, n) 5635 if (n->nr_partial || node_nr_slabs(n)) 5636 return false; 5637 return true; 5638 } 5639 5640 /* 5641 * Release all resources used by a slab cache. 5642 */ 5643 int __kmem_cache_shutdown(struct kmem_cache *s) 5644 { 5645 int node; 5646 struct kmem_cache_node *n; 5647 5648 flush_all_cpus_locked(s); 5649 /* Attempt to free all objects */ 5650 for_each_kmem_cache_node(s, node, n) { 5651 free_partial(s, n); 5652 if (n->nr_partial || node_nr_slabs(n)) 5653 return 1; 5654 } 5655 return 0; 5656 } 5657 5658 #ifdef CONFIG_PRINTK 5659 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 5660 { 5661 void *base; 5662 int __maybe_unused i; 5663 unsigned int objnr; 5664 void *objp; 5665 void *objp0; 5666 struct kmem_cache *s = slab->slab_cache; 5667 struct track __maybe_unused *trackp; 5668 5669 kpp->kp_ptr = object; 5670 kpp->kp_slab = slab; 5671 kpp->kp_slab_cache = s; 5672 base = slab_address(slab); 5673 objp0 = kasan_reset_tag(object); 5674 #ifdef CONFIG_SLUB_DEBUG 5675 objp = restore_red_left(s, objp0); 5676 #else 5677 objp = objp0; 5678 #endif 5679 objnr = obj_to_index(s, slab, objp); 5680 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 5681 objp = base + s->size * objnr; 5682 kpp->kp_objp = objp; 5683 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 5684 || (objp - base) % s->size) || 5685 !(s->flags & SLAB_STORE_USER)) 5686 return; 5687 #ifdef CONFIG_SLUB_DEBUG 5688 objp = fixup_red_left(s, objp); 5689 trackp = get_track(s, objp, TRACK_ALLOC); 5690 kpp->kp_ret = (void *)trackp->addr; 5691 #ifdef CONFIG_STACKDEPOT 5692 { 5693 depot_stack_handle_t handle; 5694 unsigned long *entries; 5695 unsigned int nr_entries; 5696 5697 handle = READ_ONCE(trackp->handle); 5698 if (handle) { 5699 nr_entries = stack_depot_fetch(handle, &entries); 5700 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5701 kpp->kp_stack[i] = (void *)entries[i]; 5702 } 5703 5704 trackp = get_track(s, objp, TRACK_FREE); 5705 handle = READ_ONCE(trackp->handle); 5706 if (handle) { 5707 nr_entries = stack_depot_fetch(handle, &entries); 5708 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5709 kpp->kp_free_stack[i] = (void *)entries[i]; 5710 } 5711 } 5712 #endif 5713 #endif 5714 } 5715 #endif 5716 5717 /******************************************************************** 5718 * Kmalloc subsystem 5719 *******************************************************************/ 5720 5721 static int __init setup_slub_min_order(char *str) 5722 { 5723 get_option(&str, (int *)&slub_min_order); 5724 5725 if (slub_min_order > slub_max_order) 5726 slub_max_order = slub_min_order; 5727 5728 return 1; 5729 } 5730 5731 __setup("slab_min_order=", setup_slub_min_order); 5732 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); 5733 5734 5735 static int __init setup_slub_max_order(char *str) 5736 { 5737 get_option(&str, (int *)&slub_max_order); 5738 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); 5739 5740 if (slub_min_order > slub_max_order) 5741 slub_min_order = slub_max_order; 5742 5743 return 1; 5744 } 5745 5746 __setup("slab_max_order=", setup_slub_max_order); 5747 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); 5748 5749 static int __init setup_slub_min_objects(char *str) 5750 { 5751 get_option(&str, (int *)&slub_min_objects); 5752 5753 return 1; 5754 } 5755 5756 __setup("slab_min_objects=", setup_slub_min_objects); 5757 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); 5758 5759 #ifdef CONFIG_NUMA 5760 static int __init setup_slab_strict_numa(char *str) 5761 { 5762 if (nr_node_ids > 1) { 5763 static_branch_enable(&strict_numa); 5764 pr_info("SLUB: Strict NUMA enabled.\n"); 5765 } else { 5766 pr_warn("slab_strict_numa parameter set on non NUMA system.\n"); 5767 } 5768 5769 return 1; 5770 } 5771 5772 __setup("slab_strict_numa", setup_slab_strict_numa); 5773 #endif 5774 5775 5776 #ifdef CONFIG_HARDENED_USERCOPY 5777 /* 5778 * Rejects incorrectly sized objects and objects that are to be copied 5779 * to/from userspace but do not fall entirely within the containing slab 5780 * cache's usercopy region. 5781 * 5782 * Returns NULL if check passes, otherwise const char * to name of cache 5783 * to indicate an error. 5784 */ 5785 void __check_heap_object(const void *ptr, unsigned long n, 5786 const struct slab *slab, bool to_user) 5787 { 5788 struct kmem_cache *s; 5789 unsigned int offset; 5790 bool is_kfence = is_kfence_address(ptr); 5791 5792 ptr = kasan_reset_tag(ptr); 5793 5794 /* Find object and usable object size. */ 5795 s = slab->slab_cache; 5796 5797 /* Reject impossible pointers. */ 5798 if (ptr < slab_address(slab)) 5799 usercopy_abort("SLUB object not in SLUB page?!", NULL, 5800 to_user, 0, n); 5801 5802 /* Find offset within object. */ 5803 if (is_kfence) 5804 offset = ptr - kfence_object_start(ptr); 5805 else 5806 offset = (ptr - slab_address(slab)) % s->size; 5807 5808 /* Adjust for redzone and reject if within the redzone. */ 5809 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 5810 if (offset < s->red_left_pad) 5811 usercopy_abort("SLUB object in left red zone", 5812 s->name, to_user, offset, n); 5813 offset -= s->red_left_pad; 5814 } 5815 5816 /* Allow address range falling entirely within usercopy region. */ 5817 if (offset >= s->useroffset && 5818 offset - s->useroffset <= s->usersize && 5819 n <= s->useroffset - offset + s->usersize) 5820 return; 5821 5822 usercopy_abort("SLUB object", s->name, to_user, offset, n); 5823 } 5824 #endif /* CONFIG_HARDENED_USERCOPY */ 5825 5826 #define SHRINK_PROMOTE_MAX 32 5827 5828 /* 5829 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 5830 * up most to the head of the partial lists. New allocations will then 5831 * fill those up and thus they can be removed from the partial lists. 5832 * 5833 * The slabs with the least items are placed last. This results in them 5834 * being allocated from last increasing the chance that the last objects 5835 * are freed in them. 5836 */ 5837 static int __kmem_cache_do_shrink(struct kmem_cache *s) 5838 { 5839 int node; 5840 int i; 5841 struct kmem_cache_node *n; 5842 struct slab *slab; 5843 struct slab *t; 5844 struct list_head discard; 5845 struct list_head promote[SHRINK_PROMOTE_MAX]; 5846 unsigned long flags; 5847 int ret = 0; 5848 5849 for_each_kmem_cache_node(s, node, n) { 5850 INIT_LIST_HEAD(&discard); 5851 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 5852 INIT_LIST_HEAD(promote + i); 5853 5854 spin_lock_irqsave(&n->list_lock, flags); 5855 5856 /* 5857 * Build lists of slabs to discard or promote. 5858 * 5859 * Note that concurrent frees may occur while we hold the 5860 * list_lock. slab->inuse here is the upper limit. 5861 */ 5862 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 5863 int free = slab->objects - slab->inuse; 5864 5865 /* Do not reread slab->inuse */ 5866 barrier(); 5867 5868 /* We do not keep full slabs on the list */ 5869 BUG_ON(free <= 0); 5870 5871 if (free == slab->objects) { 5872 list_move(&slab->slab_list, &discard); 5873 slab_clear_node_partial(slab); 5874 n->nr_partial--; 5875 dec_slabs_node(s, node, slab->objects); 5876 } else if (free <= SHRINK_PROMOTE_MAX) 5877 list_move(&slab->slab_list, promote + free - 1); 5878 } 5879 5880 /* 5881 * Promote the slabs filled up most to the head of the 5882 * partial list. 5883 */ 5884 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 5885 list_splice(promote + i, &n->partial); 5886 5887 spin_unlock_irqrestore(&n->list_lock, flags); 5888 5889 /* Release empty slabs */ 5890 list_for_each_entry_safe(slab, t, &discard, slab_list) 5891 free_slab(s, slab); 5892 5893 if (node_nr_slabs(n)) 5894 ret = 1; 5895 } 5896 5897 return ret; 5898 } 5899 5900 int __kmem_cache_shrink(struct kmem_cache *s) 5901 { 5902 flush_all(s); 5903 return __kmem_cache_do_shrink(s); 5904 } 5905 5906 static int slab_mem_going_offline_callback(void *arg) 5907 { 5908 struct kmem_cache *s; 5909 5910 mutex_lock(&slab_mutex); 5911 list_for_each_entry(s, &slab_caches, list) { 5912 flush_all_cpus_locked(s); 5913 __kmem_cache_do_shrink(s); 5914 } 5915 mutex_unlock(&slab_mutex); 5916 5917 return 0; 5918 } 5919 5920 static void slab_mem_offline_callback(void *arg) 5921 { 5922 struct memory_notify *marg = arg; 5923 int offline_node; 5924 5925 offline_node = marg->status_change_nid_normal; 5926 5927 /* 5928 * If the node still has available memory. we need kmem_cache_node 5929 * for it yet. 5930 */ 5931 if (offline_node < 0) 5932 return; 5933 5934 mutex_lock(&slab_mutex); 5935 node_clear(offline_node, slab_nodes); 5936 /* 5937 * We no longer free kmem_cache_node structures here, as it would be 5938 * racy with all get_node() users, and infeasible to protect them with 5939 * slab_mutex. 5940 */ 5941 mutex_unlock(&slab_mutex); 5942 } 5943 5944 static int slab_mem_going_online_callback(void *arg) 5945 { 5946 struct kmem_cache_node *n; 5947 struct kmem_cache *s; 5948 struct memory_notify *marg = arg; 5949 int nid = marg->status_change_nid_normal; 5950 int ret = 0; 5951 5952 /* 5953 * If the node's memory is already available, then kmem_cache_node is 5954 * already created. Nothing to do. 5955 */ 5956 if (nid < 0) 5957 return 0; 5958 5959 /* 5960 * We are bringing a node online. No memory is available yet. We must 5961 * allocate a kmem_cache_node structure in order to bring the node 5962 * online. 5963 */ 5964 mutex_lock(&slab_mutex); 5965 list_for_each_entry(s, &slab_caches, list) { 5966 /* 5967 * The structure may already exist if the node was previously 5968 * onlined and offlined. 5969 */ 5970 if (get_node(s, nid)) 5971 continue; 5972 /* 5973 * XXX: kmem_cache_alloc_node will fallback to other nodes 5974 * since memory is not yet available from the node that 5975 * is brought up. 5976 */ 5977 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 5978 if (!n) { 5979 ret = -ENOMEM; 5980 goto out; 5981 } 5982 init_kmem_cache_node(n); 5983 s->node[nid] = n; 5984 } 5985 /* 5986 * Any cache created after this point will also have kmem_cache_node 5987 * initialized for the new node. 5988 */ 5989 node_set(nid, slab_nodes); 5990 out: 5991 mutex_unlock(&slab_mutex); 5992 return ret; 5993 } 5994 5995 static int slab_memory_callback(struct notifier_block *self, 5996 unsigned long action, void *arg) 5997 { 5998 int ret = 0; 5999 6000 switch (action) { 6001 case MEM_GOING_ONLINE: 6002 ret = slab_mem_going_online_callback(arg); 6003 break; 6004 case MEM_GOING_OFFLINE: 6005 ret = slab_mem_going_offline_callback(arg); 6006 break; 6007 case MEM_OFFLINE: 6008 case MEM_CANCEL_ONLINE: 6009 slab_mem_offline_callback(arg); 6010 break; 6011 case MEM_ONLINE: 6012 case MEM_CANCEL_OFFLINE: 6013 break; 6014 } 6015 if (ret) 6016 ret = notifier_from_errno(ret); 6017 else 6018 ret = NOTIFY_OK; 6019 return ret; 6020 } 6021 6022 /******************************************************************** 6023 * Basic setup of slabs 6024 *******************************************************************/ 6025 6026 /* 6027 * Used for early kmem_cache structures that were allocated using 6028 * the page allocator. Allocate them properly then fix up the pointers 6029 * that may be pointing to the wrong kmem_cache structure. 6030 */ 6031 6032 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 6033 { 6034 int node; 6035 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 6036 struct kmem_cache_node *n; 6037 6038 memcpy(s, static_cache, kmem_cache->object_size); 6039 6040 /* 6041 * This runs very early, and only the boot processor is supposed to be 6042 * up. Even if it weren't true, IRQs are not up so we couldn't fire 6043 * IPIs around. 6044 */ 6045 __flush_cpu_slab(s, smp_processor_id()); 6046 for_each_kmem_cache_node(s, node, n) { 6047 struct slab *p; 6048 6049 list_for_each_entry(p, &n->partial, slab_list) 6050 p->slab_cache = s; 6051 6052 #ifdef CONFIG_SLUB_DEBUG 6053 list_for_each_entry(p, &n->full, slab_list) 6054 p->slab_cache = s; 6055 #endif 6056 } 6057 list_add(&s->list, &slab_caches); 6058 return s; 6059 } 6060 6061 void __init kmem_cache_init(void) 6062 { 6063 static __initdata struct kmem_cache boot_kmem_cache, 6064 boot_kmem_cache_node; 6065 int node; 6066 6067 if (debug_guardpage_minorder()) 6068 slub_max_order = 0; 6069 6070 /* Print slub debugging pointers without hashing */ 6071 if (__slub_debug_enabled()) 6072 no_hash_pointers_enable(NULL); 6073 6074 kmem_cache_node = &boot_kmem_cache_node; 6075 kmem_cache = &boot_kmem_cache; 6076 6077 /* 6078 * Initialize the nodemask for which we will allocate per node 6079 * structures. Here we don't need taking slab_mutex yet. 6080 */ 6081 for_each_node_state(node, N_NORMAL_MEMORY) 6082 node_set(node, slab_nodes); 6083 6084 create_boot_cache(kmem_cache_node, "kmem_cache_node", 6085 sizeof(struct kmem_cache_node), 6086 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 6087 6088 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 6089 6090 /* Able to allocate the per node structures */ 6091 slab_state = PARTIAL; 6092 6093 create_boot_cache(kmem_cache, "kmem_cache", 6094 offsetof(struct kmem_cache, node) + 6095 nr_node_ids * sizeof(struct kmem_cache_node *), 6096 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 6097 6098 kmem_cache = bootstrap(&boot_kmem_cache); 6099 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 6100 6101 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 6102 setup_kmalloc_cache_index_table(); 6103 create_kmalloc_caches(); 6104 6105 /* Setup random freelists for each cache */ 6106 init_freelist_randomization(); 6107 6108 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 6109 slub_cpu_dead); 6110 6111 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 6112 cache_line_size(), 6113 slub_min_order, slub_max_order, slub_min_objects, 6114 nr_cpu_ids, nr_node_ids); 6115 } 6116 6117 void __init kmem_cache_init_late(void) 6118 { 6119 #ifndef CONFIG_SLUB_TINY 6120 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 6121 WARN_ON(!flushwq); 6122 #endif 6123 } 6124 6125 struct kmem_cache * 6126 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 6127 slab_flags_t flags, void (*ctor)(void *)) 6128 { 6129 struct kmem_cache *s; 6130 6131 s = find_mergeable(size, align, flags, name, ctor); 6132 if (s) { 6133 if (sysfs_slab_alias(s, name)) 6134 pr_err("SLUB: Unable to add cache alias %s to sysfs\n", 6135 name); 6136 6137 s->refcount++; 6138 6139 /* 6140 * Adjust the object sizes so that we clear 6141 * the complete object on kzalloc. 6142 */ 6143 s->object_size = max(s->object_size, size); 6144 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 6145 } 6146 6147 return s; 6148 } 6149 6150 int do_kmem_cache_create(struct kmem_cache *s, const char *name, 6151 unsigned int size, struct kmem_cache_args *args, 6152 slab_flags_t flags) 6153 { 6154 int err = -EINVAL; 6155 6156 s->name = name; 6157 s->size = s->object_size = size; 6158 6159 s->flags = kmem_cache_flags(flags, s->name); 6160 #ifdef CONFIG_SLAB_FREELIST_HARDENED 6161 s->random = get_random_long(); 6162 #endif 6163 s->align = args->align; 6164 s->ctor = args->ctor; 6165 #ifdef CONFIG_HARDENED_USERCOPY 6166 s->useroffset = args->useroffset; 6167 s->usersize = args->usersize; 6168 #endif 6169 6170 if (!calculate_sizes(args, s)) 6171 goto out; 6172 if (disable_higher_order_debug) { 6173 /* 6174 * Disable debugging flags that store metadata if the min slab 6175 * order increased. 6176 */ 6177 if (get_order(s->size) > get_order(s->object_size)) { 6178 s->flags &= ~DEBUG_METADATA_FLAGS; 6179 s->offset = 0; 6180 if (!calculate_sizes(args, s)) 6181 goto out; 6182 } 6183 } 6184 6185 #ifdef system_has_freelist_aba 6186 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 6187 /* Enable fast mode */ 6188 s->flags |= __CMPXCHG_DOUBLE; 6189 } 6190 #endif 6191 6192 /* 6193 * The larger the object size is, the more slabs we want on the partial 6194 * list to avoid pounding the page allocator excessively. 6195 */ 6196 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 6197 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 6198 6199 set_cpu_partial(s); 6200 6201 #ifdef CONFIG_NUMA 6202 s->remote_node_defrag_ratio = 1000; 6203 #endif 6204 6205 /* Initialize the pre-computed randomized freelist if slab is up */ 6206 if (slab_state >= UP) { 6207 if (init_cache_random_seq(s)) 6208 goto out; 6209 } 6210 6211 if (!init_kmem_cache_nodes(s)) 6212 goto out; 6213 6214 if (!alloc_kmem_cache_cpus(s)) 6215 goto out; 6216 6217 err = 0; 6218 6219 /* Mutex is not taken during early boot */ 6220 if (slab_state <= UP) 6221 goto out; 6222 6223 /* 6224 * Failing to create sysfs files is not critical to SLUB functionality. 6225 * If it fails, proceed with cache creation without these files. 6226 */ 6227 if (sysfs_slab_add(s)) 6228 pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name); 6229 6230 if (s->flags & SLAB_STORE_USER) 6231 debugfs_slab_add(s); 6232 6233 out: 6234 if (err) 6235 __kmem_cache_release(s); 6236 return err; 6237 } 6238 6239 #ifdef SLAB_SUPPORTS_SYSFS 6240 static int count_inuse(struct slab *slab) 6241 { 6242 return slab->inuse; 6243 } 6244 6245 static int count_total(struct slab *slab) 6246 { 6247 return slab->objects; 6248 } 6249 #endif 6250 6251 #ifdef CONFIG_SLUB_DEBUG 6252 static void validate_slab(struct kmem_cache *s, struct slab *slab, 6253 unsigned long *obj_map) 6254 { 6255 void *p; 6256 void *addr = slab_address(slab); 6257 6258 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 6259 return; 6260 6261 /* Now we know that a valid freelist exists */ 6262 __fill_map(obj_map, s, slab); 6263 for_each_object(p, s, addr, slab->objects) { 6264 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 6265 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 6266 6267 if (!check_object(s, slab, p, val)) 6268 break; 6269 } 6270 } 6271 6272 static int validate_slab_node(struct kmem_cache *s, 6273 struct kmem_cache_node *n, unsigned long *obj_map) 6274 { 6275 unsigned long count = 0; 6276 struct slab *slab; 6277 unsigned long flags; 6278 6279 spin_lock_irqsave(&n->list_lock, flags); 6280 6281 list_for_each_entry(slab, &n->partial, slab_list) { 6282 validate_slab(s, slab, obj_map); 6283 count++; 6284 } 6285 if (count != n->nr_partial) { 6286 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 6287 s->name, count, n->nr_partial); 6288 slab_add_kunit_errors(); 6289 } 6290 6291 if (!(s->flags & SLAB_STORE_USER)) 6292 goto out; 6293 6294 list_for_each_entry(slab, &n->full, slab_list) { 6295 validate_slab(s, slab, obj_map); 6296 count++; 6297 } 6298 if (count != node_nr_slabs(n)) { 6299 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 6300 s->name, count, node_nr_slabs(n)); 6301 slab_add_kunit_errors(); 6302 } 6303 6304 out: 6305 spin_unlock_irqrestore(&n->list_lock, flags); 6306 return count; 6307 } 6308 6309 long validate_slab_cache(struct kmem_cache *s) 6310 { 6311 int node; 6312 unsigned long count = 0; 6313 struct kmem_cache_node *n; 6314 unsigned long *obj_map; 6315 6316 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 6317 if (!obj_map) 6318 return -ENOMEM; 6319 6320 flush_all(s); 6321 for_each_kmem_cache_node(s, node, n) 6322 count += validate_slab_node(s, n, obj_map); 6323 6324 bitmap_free(obj_map); 6325 6326 return count; 6327 } 6328 EXPORT_SYMBOL(validate_slab_cache); 6329 6330 #ifdef CONFIG_DEBUG_FS 6331 /* 6332 * Generate lists of code addresses where slabcache objects are allocated 6333 * and freed. 6334 */ 6335 6336 struct location { 6337 depot_stack_handle_t handle; 6338 unsigned long count; 6339 unsigned long addr; 6340 unsigned long waste; 6341 long long sum_time; 6342 long min_time; 6343 long max_time; 6344 long min_pid; 6345 long max_pid; 6346 DECLARE_BITMAP(cpus, NR_CPUS); 6347 nodemask_t nodes; 6348 }; 6349 6350 struct loc_track { 6351 unsigned long max; 6352 unsigned long count; 6353 struct location *loc; 6354 loff_t idx; 6355 }; 6356 6357 static struct dentry *slab_debugfs_root; 6358 6359 static void free_loc_track(struct loc_track *t) 6360 { 6361 if (t->max) 6362 free_pages((unsigned long)t->loc, 6363 get_order(sizeof(struct location) * t->max)); 6364 } 6365 6366 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 6367 { 6368 struct location *l; 6369 int order; 6370 6371 order = get_order(sizeof(struct location) * max); 6372 6373 l = (void *)__get_free_pages(flags, order); 6374 if (!l) 6375 return 0; 6376 6377 if (t->count) { 6378 memcpy(l, t->loc, sizeof(struct location) * t->count); 6379 free_loc_track(t); 6380 } 6381 t->max = max; 6382 t->loc = l; 6383 return 1; 6384 } 6385 6386 static int add_location(struct loc_track *t, struct kmem_cache *s, 6387 const struct track *track, 6388 unsigned int orig_size) 6389 { 6390 long start, end, pos; 6391 struct location *l; 6392 unsigned long caddr, chandle, cwaste; 6393 unsigned long age = jiffies - track->when; 6394 depot_stack_handle_t handle = 0; 6395 unsigned int waste = s->object_size - orig_size; 6396 6397 #ifdef CONFIG_STACKDEPOT 6398 handle = READ_ONCE(track->handle); 6399 #endif 6400 start = -1; 6401 end = t->count; 6402 6403 for ( ; ; ) { 6404 pos = start + (end - start + 1) / 2; 6405 6406 /* 6407 * There is nothing at "end". If we end up there 6408 * we need to add something to before end. 6409 */ 6410 if (pos == end) 6411 break; 6412 6413 l = &t->loc[pos]; 6414 caddr = l->addr; 6415 chandle = l->handle; 6416 cwaste = l->waste; 6417 if ((track->addr == caddr) && (handle == chandle) && 6418 (waste == cwaste)) { 6419 6420 l->count++; 6421 if (track->when) { 6422 l->sum_time += age; 6423 if (age < l->min_time) 6424 l->min_time = age; 6425 if (age > l->max_time) 6426 l->max_time = age; 6427 6428 if (track->pid < l->min_pid) 6429 l->min_pid = track->pid; 6430 if (track->pid > l->max_pid) 6431 l->max_pid = track->pid; 6432 6433 cpumask_set_cpu(track->cpu, 6434 to_cpumask(l->cpus)); 6435 } 6436 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6437 return 1; 6438 } 6439 6440 if (track->addr < caddr) 6441 end = pos; 6442 else if (track->addr == caddr && handle < chandle) 6443 end = pos; 6444 else if (track->addr == caddr && handle == chandle && 6445 waste < cwaste) 6446 end = pos; 6447 else 6448 start = pos; 6449 } 6450 6451 /* 6452 * Not found. Insert new tracking element. 6453 */ 6454 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 6455 return 0; 6456 6457 l = t->loc + pos; 6458 if (pos < t->count) 6459 memmove(l + 1, l, 6460 (t->count - pos) * sizeof(struct location)); 6461 t->count++; 6462 l->count = 1; 6463 l->addr = track->addr; 6464 l->sum_time = age; 6465 l->min_time = age; 6466 l->max_time = age; 6467 l->min_pid = track->pid; 6468 l->max_pid = track->pid; 6469 l->handle = handle; 6470 l->waste = waste; 6471 cpumask_clear(to_cpumask(l->cpus)); 6472 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 6473 nodes_clear(l->nodes); 6474 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6475 return 1; 6476 } 6477 6478 static void process_slab(struct loc_track *t, struct kmem_cache *s, 6479 struct slab *slab, enum track_item alloc, 6480 unsigned long *obj_map) 6481 { 6482 void *addr = slab_address(slab); 6483 bool is_alloc = (alloc == TRACK_ALLOC); 6484 void *p; 6485 6486 __fill_map(obj_map, s, slab); 6487 6488 for_each_object(p, s, addr, slab->objects) 6489 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 6490 add_location(t, s, get_track(s, p, alloc), 6491 is_alloc ? get_orig_size(s, p) : 6492 s->object_size); 6493 } 6494 #endif /* CONFIG_DEBUG_FS */ 6495 #endif /* CONFIG_SLUB_DEBUG */ 6496 6497 #ifdef SLAB_SUPPORTS_SYSFS 6498 enum slab_stat_type { 6499 SL_ALL, /* All slabs */ 6500 SL_PARTIAL, /* Only partially allocated slabs */ 6501 SL_CPU, /* Only slabs used for cpu caches */ 6502 SL_OBJECTS, /* Determine allocated objects not slabs */ 6503 SL_TOTAL /* Determine object capacity not slabs */ 6504 }; 6505 6506 #define SO_ALL (1 << SL_ALL) 6507 #define SO_PARTIAL (1 << SL_PARTIAL) 6508 #define SO_CPU (1 << SL_CPU) 6509 #define SO_OBJECTS (1 << SL_OBJECTS) 6510 #define SO_TOTAL (1 << SL_TOTAL) 6511 6512 static ssize_t show_slab_objects(struct kmem_cache *s, 6513 char *buf, unsigned long flags) 6514 { 6515 unsigned long total = 0; 6516 int node; 6517 int x; 6518 unsigned long *nodes; 6519 int len = 0; 6520 6521 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 6522 if (!nodes) 6523 return -ENOMEM; 6524 6525 if (flags & SO_CPU) { 6526 int cpu; 6527 6528 for_each_possible_cpu(cpu) { 6529 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 6530 cpu); 6531 int node; 6532 struct slab *slab; 6533 6534 slab = READ_ONCE(c->slab); 6535 if (!slab) 6536 continue; 6537 6538 node = slab_nid(slab); 6539 if (flags & SO_TOTAL) 6540 x = slab->objects; 6541 else if (flags & SO_OBJECTS) 6542 x = slab->inuse; 6543 else 6544 x = 1; 6545 6546 total += x; 6547 nodes[node] += x; 6548 6549 #ifdef CONFIG_SLUB_CPU_PARTIAL 6550 slab = slub_percpu_partial_read_once(c); 6551 if (slab) { 6552 node = slab_nid(slab); 6553 if (flags & SO_TOTAL) 6554 WARN_ON_ONCE(1); 6555 else if (flags & SO_OBJECTS) 6556 WARN_ON_ONCE(1); 6557 else 6558 x = data_race(slab->slabs); 6559 total += x; 6560 nodes[node] += x; 6561 } 6562 #endif 6563 } 6564 } 6565 6566 /* 6567 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 6568 * already held which will conflict with an existing lock order: 6569 * 6570 * mem_hotplug_lock->slab_mutex->kernfs_mutex 6571 * 6572 * We don't really need mem_hotplug_lock (to hold off 6573 * slab_mem_going_offline_callback) here because slab's memory hot 6574 * unplug code doesn't destroy the kmem_cache->node[] data. 6575 */ 6576 6577 #ifdef CONFIG_SLUB_DEBUG 6578 if (flags & SO_ALL) { 6579 struct kmem_cache_node *n; 6580 6581 for_each_kmem_cache_node(s, node, n) { 6582 6583 if (flags & SO_TOTAL) 6584 x = node_nr_objs(n); 6585 else if (flags & SO_OBJECTS) 6586 x = node_nr_objs(n) - count_partial(n, count_free); 6587 else 6588 x = node_nr_slabs(n); 6589 total += x; 6590 nodes[node] += x; 6591 } 6592 6593 } else 6594 #endif 6595 if (flags & SO_PARTIAL) { 6596 struct kmem_cache_node *n; 6597 6598 for_each_kmem_cache_node(s, node, n) { 6599 if (flags & SO_TOTAL) 6600 x = count_partial(n, count_total); 6601 else if (flags & SO_OBJECTS) 6602 x = count_partial(n, count_inuse); 6603 else 6604 x = n->nr_partial; 6605 total += x; 6606 nodes[node] += x; 6607 } 6608 } 6609 6610 len += sysfs_emit_at(buf, len, "%lu", total); 6611 #ifdef CONFIG_NUMA 6612 for (node = 0; node < nr_node_ids; node++) { 6613 if (nodes[node]) 6614 len += sysfs_emit_at(buf, len, " N%d=%lu", 6615 node, nodes[node]); 6616 } 6617 #endif 6618 len += sysfs_emit_at(buf, len, "\n"); 6619 kfree(nodes); 6620 6621 return len; 6622 } 6623 6624 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 6625 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 6626 6627 struct slab_attribute { 6628 struct attribute attr; 6629 ssize_t (*show)(struct kmem_cache *s, char *buf); 6630 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 6631 }; 6632 6633 #define SLAB_ATTR_RO(_name) \ 6634 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 6635 6636 #define SLAB_ATTR(_name) \ 6637 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 6638 6639 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 6640 { 6641 return sysfs_emit(buf, "%u\n", s->size); 6642 } 6643 SLAB_ATTR_RO(slab_size); 6644 6645 static ssize_t align_show(struct kmem_cache *s, char *buf) 6646 { 6647 return sysfs_emit(buf, "%u\n", s->align); 6648 } 6649 SLAB_ATTR_RO(align); 6650 6651 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 6652 { 6653 return sysfs_emit(buf, "%u\n", s->object_size); 6654 } 6655 SLAB_ATTR_RO(object_size); 6656 6657 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 6658 { 6659 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 6660 } 6661 SLAB_ATTR_RO(objs_per_slab); 6662 6663 static ssize_t order_show(struct kmem_cache *s, char *buf) 6664 { 6665 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 6666 } 6667 SLAB_ATTR_RO(order); 6668 6669 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 6670 { 6671 return sysfs_emit(buf, "%lu\n", s->min_partial); 6672 } 6673 6674 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 6675 size_t length) 6676 { 6677 unsigned long min; 6678 int err; 6679 6680 err = kstrtoul(buf, 10, &min); 6681 if (err) 6682 return err; 6683 6684 s->min_partial = min; 6685 return length; 6686 } 6687 SLAB_ATTR(min_partial); 6688 6689 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 6690 { 6691 unsigned int nr_partial = 0; 6692 #ifdef CONFIG_SLUB_CPU_PARTIAL 6693 nr_partial = s->cpu_partial; 6694 #endif 6695 6696 return sysfs_emit(buf, "%u\n", nr_partial); 6697 } 6698 6699 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 6700 size_t length) 6701 { 6702 unsigned int objects; 6703 int err; 6704 6705 err = kstrtouint(buf, 10, &objects); 6706 if (err) 6707 return err; 6708 if (objects && !kmem_cache_has_cpu_partial(s)) 6709 return -EINVAL; 6710 6711 slub_set_cpu_partial(s, objects); 6712 flush_all(s); 6713 return length; 6714 } 6715 SLAB_ATTR(cpu_partial); 6716 6717 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 6718 { 6719 if (!s->ctor) 6720 return 0; 6721 return sysfs_emit(buf, "%pS\n", s->ctor); 6722 } 6723 SLAB_ATTR_RO(ctor); 6724 6725 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 6726 { 6727 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 6728 } 6729 SLAB_ATTR_RO(aliases); 6730 6731 static ssize_t partial_show(struct kmem_cache *s, char *buf) 6732 { 6733 return show_slab_objects(s, buf, SO_PARTIAL); 6734 } 6735 SLAB_ATTR_RO(partial); 6736 6737 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 6738 { 6739 return show_slab_objects(s, buf, SO_CPU); 6740 } 6741 SLAB_ATTR_RO(cpu_slabs); 6742 6743 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 6744 { 6745 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 6746 } 6747 SLAB_ATTR_RO(objects_partial); 6748 6749 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 6750 { 6751 int objects = 0; 6752 int slabs = 0; 6753 int cpu __maybe_unused; 6754 int len = 0; 6755 6756 #ifdef CONFIG_SLUB_CPU_PARTIAL 6757 for_each_online_cpu(cpu) { 6758 struct slab *slab; 6759 6760 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6761 6762 if (slab) 6763 slabs += data_race(slab->slabs); 6764 } 6765 #endif 6766 6767 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 6768 objects = (slabs * oo_objects(s->oo)) / 2; 6769 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 6770 6771 #ifdef CONFIG_SLUB_CPU_PARTIAL 6772 for_each_online_cpu(cpu) { 6773 struct slab *slab; 6774 6775 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6776 if (slab) { 6777 slabs = data_race(slab->slabs); 6778 objects = (slabs * oo_objects(s->oo)) / 2; 6779 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 6780 cpu, objects, slabs); 6781 } 6782 } 6783 #endif 6784 len += sysfs_emit_at(buf, len, "\n"); 6785 6786 return len; 6787 } 6788 SLAB_ATTR_RO(slabs_cpu_partial); 6789 6790 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 6791 { 6792 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 6793 } 6794 SLAB_ATTR_RO(reclaim_account); 6795 6796 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 6797 { 6798 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 6799 } 6800 SLAB_ATTR_RO(hwcache_align); 6801 6802 #ifdef CONFIG_ZONE_DMA 6803 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 6804 { 6805 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 6806 } 6807 SLAB_ATTR_RO(cache_dma); 6808 #endif 6809 6810 #ifdef CONFIG_HARDENED_USERCOPY 6811 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 6812 { 6813 return sysfs_emit(buf, "%u\n", s->usersize); 6814 } 6815 SLAB_ATTR_RO(usersize); 6816 #endif 6817 6818 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 6819 { 6820 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 6821 } 6822 SLAB_ATTR_RO(destroy_by_rcu); 6823 6824 #ifdef CONFIG_SLUB_DEBUG 6825 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 6826 { 6827 return show_slab_objects(s, buf, SO_ALL); 6828 } 6829 SLAB_ATTR_RO(slabs); 6830 6831 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 6832 { 6833 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 6834 } 6835 SLAB_ATTR_RO(total_objects); 6836 6837 static ssize_t objects_show(struct kmem_cache *s, char *buf) 6838 { 6839 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 6840 } 6841 SLAB_ATTR_RO(objects); 6842 6843 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 6844 { 6845 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 6846 } 6847 SLAB_ATTR_RO(sanity_checks); 6848 6849 static ssize_t trace_show(struct kmem_cache *s, char *buf) 6850 { 6851 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 6852 } 6853 SLAB_ATTR_RO(trace); 6854 6855 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 6856 { 6857 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 6858 } 6859 6860 SLAB_ATTR_RO(red_zone); 6861 6862 static ssize_t poison_show(struct kmem_cache *s, char *buf) 6863 { 6864 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 6865 } 6866 6867 SLAB_ATTR_RO(poison); 6868 6869 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 6870 { 6871 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 6872 } 6873 6874 SLAB_ATTR_RO(store_user); 6875 6876 static ssize_t validate_show(struct kmem_cache *s, char *buf) 6877 { 6878 return 0; 6879 } 6880 6881 static ssize_t validate_store(struct kmem_cache *s, 6882 const char *buf, size_t length) 6883 { 6884 int ret = -EINVAL; 6885 6886 if (buf[0] == '1' && kmem_cache_debug(s)) { 6887 ret = validate_slab_cache(s); 6888 if (ret >= 0) 6889 ret = length; 6890 } 6891 return ret; 6892 } 6893 SLAB_ATTR(validate); 6894 6895 #endif /* CONFIG_SLUB_DEBUG */ 6896 6897 #ifdef CONFIG_FAILSLAB 6898 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 6899 { 6900 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 6901 } 6902 6903 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 6904 size_t length) 6905 { 6906 if (s->refcount > 1) 6907 return -EINVAL; 6908 6909 if (buf[0] == '1') 6910 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 6911 else 6912 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 6913 6914 return length; 6915 } 6916 SLAB_ATTR(failslab); 6917 #endif 6918 6919 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 6920 { 6921 return 0; 6922 } 6923 6924 static ssize_t shrink_store(struct kmem_cache *s, 6925 const char *buf, size_t length) 6926 { 6927 if (buf[0] == '1') 6928 kmem_cache_shrink(s); 6929 else 6930 return -EINVAL; 6931 return length; 6932 } 6933 SLAB_ATTR(shrink); 6934 6935 #ifdef CONFIG_NUMA 6936 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 6937 { 6938 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 6939 } 6940 6941 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 6942 const char *buf, size_t length) 6943 { 6944 unsigned int ratio; 6945 int err; 6946 6947 err = kstrtouint(buf, 10, &ratio); 6948 if (err) 6949 return err; 6950 if (ratio > 100) 6951 return -ERANGE; 6952 6953 s->remote_node_defrag_ratio = ratio * 10; 6954 6955 return length; 6956 } 6957 SLAB_ATTR(remote_node_defrag_ratio); 6958 #endif 6959 6960 #ifdef CONFIG_SLUB_STATS 6961 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 6962 { 6963 unsigned long sum = 0; 6964 int cpu; 6965 int len = 0; 6966 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 6967 6968 if (!data) 6969 return -ENOMEM; 6970 6971 for_each_online_cpu(cpu) { 6972 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 6973 6974 data[cpu] = x; 6975 sum += x; 6976 } 6977 6978 len += sysfs_emit_at(buf, len, "%lu", sum); 6979 6980 #ifdef CONFIG_SMP 6981 for_each_online_cpu(cpu) { 6982 if (data[cpu]) 6983 len += sysfs_emit_at(buf, len, " C%d=%u", 6984 cpu, data[cpu]); 6985 } 6986 #endif 6987 kfree(data); 6988 len += sysfs_emit_at(buf, len, "\n"); 6989 6990 return len; 6991 } 6992 6993 static void clear_stat(struct kmem_cache *s, enum stat_item si) 6994 { 6995 int cpu; 6996 6997 for_each_online_cpu(cpu) 6998 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 6999 } 7000 7001 #define STAT_ATTR(si, text) \ 7002 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 7003 { \ 7004 return show_stat(s, buf, si); \ 7005 } \ 7006 static ssize_t text##_store(struct kmem_cache *s, \ 7007 const char *buf, size_t length) \ 7008 { \ 7009 if (buf[0] != '0') \ 7010 return -EINVAL; \ 7011 clear_stat(s, si); \ 7012 return length; \ 7013 } \ 7014 SLAB_ATTR(text); \ 7015 7016 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 7017 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 7018 STAT_ATTR(FREE_FASTPATH, free_fastpath); 7019 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 7020 STAT_ATTR(FREE_FROZEN, free_frozen); 7021 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 7022 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 7023 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 7024 STAT_ATTR(ALLOC_SLAB, alloc_slab); 7025 STAT_ATTR(ALLOC_REFILL, alloc_refill); 7026 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 7027 STAT_ATTR(FREE_SLAB, free_slab); 7028 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 7029 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 7030 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 7031 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 7032 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 7033 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 7034 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 7035 STAT_ATTR(ORDER_FALLBACK, order_fallback); 7036 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 7037 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 7038 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 7039 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 7040 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 7041 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 7042 #endif /* CONFIG_SLUB_STATS */ 7043 7044 #ifdef CONFIG_KFENCE 7045 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 7046 { 7047 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 7048 } 7049 7050 static ssize_t skip_kfence_store(struct kmem_cache *s, 7051 const char *buf, size_t length) 7052 { 7053 int ret = length; 7054 7055 if (buf[0] == '0') 7056 s->flags &= ~SLAB_SKIP_KFENCE; 7057 else if (buf[0] == '1') 7058 s->flags |= SLAB_SKIP_KFENCE; 7059 else 7060 ret = -EINVAL; 7061 7062 return ret; 7063 } 7064 SLAB_ATTR(skip_kfence); 7065 #endif 7066 7067 static struct attribute *slab_attrs[] = { 7068 &slab_size_attr.attr, 7069 &object_size_attr.attr, 7070 &objs_per_slab_attr.attr, 7071 &order_attr.attr, 7072 &min_partial_attr.attr, 7073 &cpu_partial_attr.attr, 7074 &objects_partial_attr.attr, 7075 &partial_attr.attr, 7076 &cpu_slabs_attr.attr, 7077 &ctor_attr.attr, 7078 &aliases_attr.attr, 7079 &align_attr.attr, 7080 &hwcache_align_attr.attr, 7081 &reclaim_account_attr.attr, 7082 &destroy_by_rcu_attr.attr, 7083 &shrink_attr.attr, 7084 &slabs_cpu_partial_attr.attr, 7085 #ifdef CONFIG_SLUB_DEBUG 7086 &total_objects_attr.attr, 7087 &objects_attr.attr, 7088 &slabs_attr.attr, 7089 &sanity_checks_attr.attr, 7090 &trace_attr.attr, 7091 &red_zone_attr.attr, 7092 &poison_attr.attr, 7093 &store_user_attr.attr, 7094 &validate_attr.attr, 7095 #endif 7096 #ifdef CONFIG_ZONE_DMA 7097 &cache_dma_attr.attr, 7098 #endif 7099 #ifdef CONFIG_NUMA 7100 &remote_node_defrag_ratio_attr.attr, 7101 #endif 7102 #ifdef CONFIG_SLUB_STATS 7103 &alloc_fastpath_attr.attr, 7104 &alloc_slowpath_attr.attr, 7105 &free_fastpath_attr.attr, 7106 &free_slowpath_attr.attr, 7107 &free_frozen_attr.attr, 7108 &free_add_partial_attr.attr, 7109 &free_remove_partial_attr.attr, 7110 &alloc_from_partial_attr.attr, 7111 &alloc_slab_attr.attr, 7112 &alloc_refill_attr.attr, 7113 &alloc_node_mismatch_attr.attr, 7114 &free_slab_attr.attr, 7115 &cpuslab_flush_attr.attr, 7116 &deactivate_full_attr.attr, 7117 &deactivate_empty_attr.attr, 7118 &deactivate_to_head_attr.attr, 7119 &deactivate_to_tail_attr.attr, 7120 &deactivate_remote_frees_attr.attr, 7121 &deactivate_bypass_attr.attr, 7122 &order_fallback_attr.attr, 7123 &cmpxchg_double_fail_attr.attr, 7124 &cmpxchg_double_cpu_fail_attr.attr, 7125 &cpu_partial_alloc_attr.attr, 7126 &cpu_partial_free_attr.attr, 7127 &cpu_partial_node_attr.attr, 7128 &cpu_partial_drain_attr.attr, 7129 #endif 7130 #ifdef CONFIG_FAILSLAB 7131 &failslab_attr.attr, 7132 #endif 7133 #ifdef CONFIG_HARDENED_USERCOPY 7134 &usersize_attr.attr, 7135 #endif 7136 #ifdef CONFIG_KFENCE 7137 &skip_kfence_attr.attr, 7138 #endif 7139 7140 NULL 7141 }; 7142 7143 static const struct attribute_group slab_attr_group = { 7144 .attrs = slab_attrs, 7145 }; 7146 7147 static ssize_t slab_attr_show(struct kobject *kobj, 7148 struct attribute *attr, 7149 char *buf) 7150 { 7151 struct slab_attribute *attribute; 7152 struct kmem_cache *s; 7153 7154 attribute = to_slab_attr(attr); 7155 s = to_slab(kobj); 7156 7157 if (!attribute->show) 7158 return -EIO; 7159 7160 return attribute->show(s, buf); 7161 } 7162 7163 static ssize_t slab_attr_store(struct kobject *kobj, 7164 struct attribute *attr, 7165 const char *buf, size_t len) 7166 { 7167 struct slab_attribute *attribute; 7168 struct kmem_cache *s; 7169 7170 attribute = to_slab_attr(attr); 7171 s = to_slab(kobj); 7172 7173 if (!attribute->store) 7174 return -EIO; 7175 7176 return attribute->store(s, buf, len); 7177 } 7178 7179 static void kmem_cache_release(struct kobject *k) 7180 { 7181 slab_kmem_cache_release(to_slab(k)); 7182 } 7183 7184 static const struct sysfs_ops slab_sysfs_ops = { 7185 .show = slab_attr_show, 7186 .store = slab_attr_store, 7187 }; 7188 7189 static const struct kobj_type slab_ktype = { 7190 .sysfs_ops = &slab_sysfs_ops, 7191 .release = kmem_cache_release, 7192 }; 7193 7194 static struct kset *slab_kset; 7195 7196 static inline struct kset *cache_kset(struct kmem_cache *s) 7197 { 7198 return slab_kset; 7199 } 7200 7201 #define ID_STR_LENGTH 32 7202 7203 /* Create a unique string id for a slab cache: 7204 * 7205 * Format :[flags-]size 7206 */ 7207 static char *create_unique_id(struct kmem_cache *s) 7208 { 7209 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 7210 char *p = name; 7211 7212 if (!name) 7213 return ERR_PTR(-ENOMEM); 7214 7215 *p++ = ':'; 7216 /* 7217 * First flags affecting slabcache operations. We will only 7218 * get here for aliasable slabs so we do not need to support 7219 * too many flags. The flags here must cover all flags that 7220 * are matched during merging to guarantee that the id is 7221 * unique. 7222 */ 7223 if (s->flags & SLAB_CACHE_DMA) 7224 *p++ = 'd'; 7225 if (s->flags & SLAB_CACHE_DMA32) 7226 *p++ = 'D'; 7227 if (s->flags & SLAB_RECLAIM_ACCOUNT) 7228 *p++ = 'a'; 7229 if (s->flags & SLAB_CONSISTENCY_CHECKS) 7230 *p++ = 'F'; 7231 if (s->flags & SLAB_ACCOUNT) 7232 *p++ = 'A'; 7233 if (p != name + 1) 7234 *p++ = '-'; 7235 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 7236 7237 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 7238 kfree(name); 7239 return ERR_PTR(-EINVAL); 7240 } 7241 kmsan_unpoison_memory(name, p - name); 7242 return name; 7243 } 7244 7245 static int sysfs_slab_add(struct kmem_cache *s) 7246 { 7247 int err; 7248 const char *name; 7249 struct kset *kset = cache_kset(s); 7250 int unmergeable = slab_unmergeable(s); 7251 7252 if (!unmergeable && disable_higher_order_debug && 7253 (slub_debug & DEBUG_METADATA_FLAGS)) 7254 unmergeable = 1; 7255 7256 if (unmergeable) { 7257 /* 7258 * Slabcache can never be merged so we can use the name proper. 7259 * This is typically the case for debug situations. In that 7260 * case we can catch duplicate names easily. 7261 */ 7262 sysfs_remove_link(&slab_kset->kobj, s->name); 7263 name = s->name; 7264 } else { 7265 /* 7266 * Create a unique name for the slab as a target 7267 * for the symlinks. 7268 */ 7269 name = create_unique_id(s); 7270 if (IS_ERR(name)) 7271 return PTR_ERR(name); 7272 } 7273 7274 s->kobj.kset = kset; 7275 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 7276 if (err) 7277 goto out; 7278 7279 err = sysfs_create_group(&s->kobj, &slab_attr_group); 7280 if (err) 7281 goto out_del_kobj; 7282 7283 if (!unmergeable) { 7284 /* Setup first alias */ 7285 sysfs_slab_alias(s, s->name); 7286 } 7287 out: 7288 if (!unmergeable) 7289 kfree(name); 7290 return err; 7291 out_del_kobj: 7292 kobject_del(&s->kobj); 7293 goto out; 7294 } 7295 7296 void sysfs_slab_unlink(struct kmem_cache *s) 7297 { 7298 if (s->kobj.state_in_sysfs) 7299 kobject_del(&s->kobj); 7300 } 7301 7302 void sysfs_slab_release(struct kmem_cache *s) 7303 { 7304 kobject_put(&s->kobj); 7305 } 7306 7307 /* 7308 * Need to buffer aliases during bootup until sysfs becomes 7309 * available lest we lose that information. 7310 */ 7311 struct saved_alias { 7312 struct kmem_cache *s; 7313 const char *name; 7314 struct saved_alias *next; 7315 }; 7316 7317 static struct saved_alias *alias_list; 7318 7319 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 7320 { 7321 struct saved_alias *al; 7322 7323 if (slab_state == FULL) { 7324 /* 7325 * If we have a leftover link then remove it. 7326 */ 7327 sysfs_remove_link(&slab_kset->kobj, name); 7328 /* 7329 * The original cache may have failed to generate sysfs file. 7330 * In that case, sysfs_create_link() returns -ENOENT and 7331 * symbolic link creation is skipped. 7332 */ 7333 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 7334 } 7335 7336 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 7337 if (!al) 7338 return -ENOMEM; 7339 7340 al->s = s; 7341 al->name = name; 7342 al->next = alias_list; 7343 alias_list = al; 7344 kmsan_unpoison_memory(al, sizeof(*al)); 7345 return 0; 7346 } 7347 7348 static int __init slab_sysfs_init(void) 7349 { 7350 struct kmem_cache *s; 7351 int err; 7352 7353 mutex_lock(&slab_mutex); 7354 7355 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 7356 if (!slab_kset) { 7357 mutex_unlock(&slab_mutex); 7358 pr_err("Cannot register slab subsystem.\n"); 7359 return -ENOMEM; 7360 } 7361 7362 slab_state = FULL; 7363 7364 list_for_each_entry(s, &slab_caches, list) { 7365 err = sysfs_slab_add(s); 7366 if (err) 7367 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 7368 s->name); 7369 } 7370 7371 while (alias_list) { 7372 struct saved_alias *al = alias_list; 7373 7374 alias_list = alias_list->next; 7375 err = sysfs_slab_alias(al->s, al->name); 7376 if (err) 7377 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 7378 al->name); 7379 kfree(al); 7380 } 7381 7382 mutex_unlock(&slab_mutex); 7383 return 0; 7384 } 7385 late_initcall(slab_sysfs_init); 7386 #endif /* SLAB_SUPPORTS_SYSFS */ 7387 7388 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 7389 static int slab_debugfs_show(struct seq_file *seq, void *v) 7390 { 7391 struct loc_track *t = seq->private; 7392 struct location *l; 7393 unsigned long idx; 7394 7395 idx = (unsigned long) t->idx; 7396 if (idx < t->count) { 7397 l = &t->loc[idx]; 7398 7399 seq_printf(seq, "%7ld ", l->count); 7400 7401 if (l->addr) 7402 seq_printf(seq, "%pS", (void *)l->addr); 7403 else 7404 seq_puts(seq, "<not-available>"); 7405 7406 if (l->waste) 7407 seq_printf(seq, " waste=%lu/%lu", 7408 l->count * l->waste, l->waste); 7409 7410 if (l->sum_time != l->min_time) { 7411 seq_printf(seq, " age=%ld/%llu/%ld", 7412 l->min_time, div_u64(l->sum_time, l->count), 7413 l->max_time); 7414 } else 7415 seq_printf(seq, " age=%ld", l->min_time); 7416 7417 if (l->min_pid != l->max_pid) 7418 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 7419 else 7420 seq_printf(seq, " pid=%ld", 7421 l->min_pid); 7422 7423 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 7424 seq_printf(seq, " cpus=%*pbl", 7425 cpumask_pr_args(to_cpumask(l->cpus))); 7426 7427 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 7428 seq_printf(seq, " nodes=%*pbl", 7429 nodemask_pr_args(&l->nodes)); 7430 7431 #ifdef CONFIG_STACKDEPOT 7432 { 7433 depot_stack_handle_t handle; 7434 unsigned long *entries; 7435 unsigned int nr_entries, j; 7436 7437 handle = READ_ONCE(l->handle); 7438 if (handle) { 7439 nr_entries = stack_depot_fetch(handle, &entries); 7440 seq_puts(seq, "\n"); 7441 for (j = 0; j < nr_entries; j++) 7442 seq_printf(seq, " %pS\n", (void *)entries[j]); 7443 } 7444 } 7445 #endif 7446 seq_puts(seq, "\n"); 7447 } 7448 7449 if (!idx && !t->count) 7450 seq_puts(seq, "No data\n"); 7451 7452 return 0; 7453 } 7454 7455 static void slab_debugfs_stop(struct seq_file *seq, void *v) 7456 { 7457 } 7458 7459 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 7460 { 7461 struct loc_track *t = seq->private; 7462 7463 t->idx = ++(*ppos); 7464 if (*ppos <= t->count) 7465 return ppos; 7466 7467 return NULL; 7468 } 7469 7470 static int cmp_loc_by_count(const void *a, const void *b, const void *data) 7471 { 7472 struct location *loc1 = (struct location *)a; 7473 struct location *loc2 = (struct location *)b; 7474 7475 if (loc1->count > loc2->count) 7476 return -1; 7477 else 7478 return 1; 7479 } 7480 7481 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 7482 { 7483 struct loc_track *t = seq->private; 7484 7485 t->idx = *ppos; 7486 return ppos; 7487 } 7488 7489 static const struct seq_operations slab_debugfs_sops = { 7490 .start = slab_debugfs_start, 7491 .next = slab_debugfs_next, 7492 .stop = slab_debugfs_stop, 7493 .show = slab_debugfs_show, 7494 }; 7495 7496 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 7497 { 7498 7499 struct kmem_cache_node *n; 7500 enum track_item alloc; 7501 int node; 7502 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 7503 sizeof(struct loc_track)); 7504 struct kmem_cache *s = file_inode(filep)->i_private; 7505 unsigned long *obj_map; 7506 7507 if (!t) 7508 return -ENOMEM; 7509 7510 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 7511 if (!obj_map) { 7512 seq_release_private(inode, filep); 7513 return -ENOMEM; 7514 } 7515 7516 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 7517 alloc = TRACK_ALLOC; 7518 else 7519 alloc = TRACK_FREE; 7520 7521 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 7522 bitmap_free(obj_map); 7523 seq_release_private(inode, filep); 7524 return -ENOMEM; 7525 } 7526 7527 for_each_kmem_cache_node(s, node, n) { 7528 unsigned long flags; 7529 struct slab *slab; 7530 7531 if (!node_nr_slabs(n)) 7532 continue; 7533 7534 spin_lock_irqsave(&n->list_lock, flags); 7535 list_for_each_entry(slab, &n->partial, slab_list) 7536 process_slab(t, s, slab, alloc, obj_map); 7537 list_for_each_entry(slab, &n->full, slab_list) 7538 process_slab(t, s, slab, alloc, obj_map); 7539 spin_unlock_irqrestore(&n->list_lock, flags); 7540 } 7541 7542 /* Sort locations by count */ 7543 sort_r(t->loc, t->count, sizeof(struct location), 7544 cmp_loc_by_count, NULL, NULL); 7545 7546 bitmap_free(obj_map); 7547 return 0; 7548 } 7549 7550 static int slab_debug_trace_release(struct inode *inode, struct file *file) 7551 { 7552 struct seq_file *seq = file->private_data; 7553 struct loc_track *t = seq->private; 7554 7555 free_loc_track(t); 7556 return seq_release_private(inode, file); 7557 } 7558 7559 static const struct file_operations slab_debugfs_fops = { 7560 .open = slab_debug_trace_open, 7561 .read = seq_read, 7562 .llseek = seq_lseek, 7563 .release = slab_debug_trace_release, 7564 }; 7565 7566 static void debugfs_slab_add(struct kmem_cache *s) 7567 { 7568 struct dentry *slab_cache_dir; 7569 7570 if (unlikely(!slab_debugfs_root)) 7571 return; 7572 7573 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 7574 7575 debugfs_create_file("alloc_traces", 0400, 7576 slab_cache_dir, s, &slab_debugfs_fops); 7577 7578 debugfs_create_file("free_traces", 0400, 7579 slab_cache_dir, s, &slab_debugfs_fops); 7580 } 7581 7582 void debugfs_slab_release(struct kmem_cache *s) 7583 { 7584 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 7585 } 7586 7587 static int __init slab_debugfs_init(void) 7588 { 7589 struct kmem_cache *s; 7590 7591 slab_debugfs_root = debugfs_create_dir("slab", NULL); 7592 7593 list_for_each_entry(s, &slab_caches, list) 7594 if (s->flags & SLAB_STORE_USER) 7595 debugfs_slab_add(s); 7596 7597 return 0; 7598 7599 } 7600 __initcall(slab_debugfs_init); 7601 #endif 7602 /* 7603 * The /proc/slabinfo ABI 7604 */ 7605 #ifdef CONFIG_SLUB_DEBUG 7606 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 7607 { 7608 unsigned long nr_slabs = 0; 7609 unsigned long nr_objs = 0; 7610 unsigned long nr_free = 0; 7611 int node; 7612 struct kmem_cache_node *n; 7613 7614 for_each_kmem_cache_node(s, node, n) { 7615 nr_slabs += node_nr_slabs(n); 7616 nr_objs += node_nr_objs(n); 7617 nr_free += count_partial_free_approx(n); 7618 } 7619 7620 sinfo->active_objs = nr_objs - nr_free; 7621 sinfo->num_objs = nr_objs; 7622 sinfo->active_slabs = nr_slabs; 7623 sinfo->num_slabs = nr_slabs; 7624 sinfo->objects_per_slab = oo_objects(s->oo); 7625 sinfo->cache_order = oo_order(s->oo); 7626 } 7627 #endif /* CONFIG_SLUB_DEBUG */ 7628