1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SLUB: A slab allocator with low overhead percpu array caches and mostly
4 * lockless freeing of objects to slabs in the slowpath.
5 *
6 * The allocator synchronizes using spin_trylock for percpu arrays in the
7 * fastpath, and cmpxchg_double (or bit spinlock) for slowpath freeing.
8 * Uses a centralized lock to manage a pool of partial slabs.
9 *
10 * (C) 2007 SGI, Christoph Lameter
11 * (C) 2011 Linux Foundation, Christoph Lameter
12 * (C) 2025 SUSE, Vlastimil Babka
13 */
14
15 #include <linux/mm.h>
16 #include <linux/swap.h> /* mm_account_reclaimed_pages() */
17 #include <linux/module.h>
18 #include <linux/bit_spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/swab.h>
21 #include <linux/bitops.h>
22 #include <linux/slab.h>
23 #include "slab.h"
24 #include <linux/vmalloc.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kasan.h>
28 #include <linux/node.h>
29 #include <linux/kmsan.h>
30 #include <linux/cpu.h>
31 #include <linux/cpuset.h>
32 #include <linux/mempolicy.h>
33 #include <linux/ctype.h>
34 #include <linux/stackdepot.h>
35 #include <linux/debugobjects.h>
36 #include <linux/kallsyms.h>
37 #include <linux/kfence.h>
38 #include <linux/memory.h>
39 #include <linux/math64.h>
40 #include <linux/fault-inject.h>
41 #include <linux/kmemleak.h>
42 #include <linux/stacktrace.h>
43 #include <linux/prefetch.h>
44 #include <linux/memcontrol.h>
45 #include <linux/random.h>
46 #include <linux/prandom.h>
47 #include <kunit/test.h>
48 #include <kunit/test-bug.h>
49 #include <linux/sort.h>
50 #include <linux/irq_work.h>
51 #include <linux/kprobes.h>
52 #include <linux/debugfs.h>
53 #include <trace/events/kmem.h>
54
55 #include "internal.h"
56
57 /*
58 * Lock order:
59 * 0. cpu_hotplug_lock
60 * 1. slab_mutex (Global Mutex)
61 * 2a. kmem_cache->cpu_sheaves->lock (Local trylock)
62 * 2b. barn->lock (Spinlock)
63 * 2c. node->list_lock (Spinlock)
64 * 3. slab_lock(slab) (Only on some arches)
65 * 4. object_map_lock (Only for debugging)
66 *
67 * slab_mutex
68 *
69 * The role of the slab_mutex is to protect the list of all the slabs
70 * and to synchronize major metadata changes to slab cache structures.
71 * Also synchronizes memory hotplug callbacks.
72 *
73 * slab_lock
74 *
75 * The slab_lock is a wrapper around the page lock, thus it is a bit
76 * spinlock.
77 *
78 * The slab_lock is only used on arches that do not have the ability
79 * to do a cmpxchg_double. It only protects:
80 *
81 * A. slab->freelist -> List of free objects in a slab
82 * B. slab->inuse -> Number of objects in use
83 * C. slab->objects -> Number of objects in slab
84 * D. slab->frozen -> frozen state
85 *
86 * SL_partial slabs
87 *
88 * Slabs on node partial list have at least one free object. A limited number
89 * of slabs on the list can be fully free (slab->inuse == 0), until we start
90 * discarding them. These slabs are marked with SL_partial, and the flag is
91 * cleared while removing them, usually to grab their freelist afterwards.
92 * This clearing also exempts them from list management. Please see
93 * __slab_free() for more details.
94 *
95 * Full slabs
96 *
97 * For caches without debugging enabled, full slabs (slab->inuse ==
98 * slab->objects and slab->freelist == NULL) are not placed on any list.
99 * The __slab_free() freeing the first object from such a slab will place
100 * it on the partial list. Caches with debugging enabled place such slab
101 * on the full list and use different allocation and freeing paths.
102 *
103 * Frozen slabs
104 *
105 * If a slab is frozen then it is exempt from list management. It is used to
106 * indicate a slab that has failed consistency checks and thus cannot be
107 * allocated from anymore - it is also marked as full. Any previously
108 * allocated objects will be simply leaked upon freeing instead of attempting
109 * to modify the potentially corrupted freelist and metadata.
110 *
111 * To sum up, the current scheme is:
112 * - node partial slab: SL_partial && !full && !frozen
113 * - taken off partial list: !SL_partial && !full && !frozen
114 * - full slab, not on any list: !SL_partial && full && !frozen
115 * - frozen due to inconsistency: !SL_partial && full && frozen
116 *
117 * node->list_lock (spinlock)
118 *
119 * The list_lock protects the partial and full list on each node and
120 * the partial slab counter. If taken then no new slabs may be added or
121 * removed from the lists nor make the number of partial slabs be modified.
122 * (Note that the total number of slabs is an atomic value that may be
123 * modified without taking the list lock).
124 *
125 * The list_lock is a centralized lock and thus we avoid taking it as
126 * much as possible. As long as SLUB does not have to handle partial
127 * slabs, operations can continue without any centralized lock.
128 *
129 * For debug caches, all allocations are forced to go through a list_lock
130 * protected region to serialize against concurrent validation.
131 *
132 * cpu_sheaves->lock (local_trylock)
133 *
134 * This lock protects fastpath operations on the percpu sheaves. On !RT it
135 * only disables preemption and does no atomic operations. As long as the main
136 * or spare sheaf can handle the allocation or free, there is no other
137 * overhead.
138 *
139 * barn->lock (spinlock)
140 *
141 * This lock protects the operations on per-NUMA-node barn. It can quickly
142 * serve an empty or full sheaf if available, and avoid more expensive refill
143 * or flush operation.
144 *
145 * Lockless freeing
146 *
147 * Objects may have to be freed to their slabs when they are from a remote
148 * node (where we want to avoid filling local sheaves with remote objects)
149 * or when there are too many full sheaves. On architectures supporting
150 * cmpxchg_double this is done by a lockless update of slab's freelist and
151 * counters, otherwise slab_lock is taken. This only needs to take the
152 * list_lock if it's a first free to a full slab, or when a slab becomes empty
153 * after the free.
154 *
155 * irq, preemption, migration considerations
156 *
157 * Interrupts are disabled as part of list_lock or barn lock operations, or
158 * around the slab_lock operation, in order to make the slab allocator safe
159 * to use in the context of an irq.
160 * Preemption is disabled as part of local_trylock operations.
161 * kmalloc_nolock() and kfree_nolock() are safe in NMI context but see
162 * their limitations.
163 *
164 * SLUB assigns two object arrays called sheaves for caching allocations and
165 * frees on each cpu, with a NUMA node shared barn for balancing between cpus.
166 * Allocations and frees are primarily served from these sheaves.
167 *
168 * Slabs with free elements are kept on a partial list and during regular
169 * operations no list for full slabs is used. If an object in a full slab is
170 * freed then the slab will show up again on the partial lists.
171 * We track full slabs for debugging purposes though because otherwise we
172 * cannot scan all objects.
173 *
174 * Slabs are freed when they become empty. Teardown and setup is minimal so we
175 * rely on the page allocators per cpu caches for fast frees and allocs.
176 *
177 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug
178 * options set. This moves slab handling out of
179 * the fast path and disables lockless freelists.
180 */
181
182 /**
183 * enum slab_flags - How the slab flags bits are used.
184 * @SL_locked: Is locked with slab_lock()
185 * @SL_partial: On the per-node partial list
186 * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves
187 *
188 * The slab flags share space with the page flags but some bits have
189 * different interpretations. The high bits are used for information
190 * like zone/node/section.
191 */
192 enum slab_flags {
193 SL_locked = PG_locked,
194 SL_partial = PG_workingset, /* Historical reasons for this bit */
195 SL_pfmemalloc = PG_active, /* Historical reasons for this bit */
196 };
197
198 #ifndef CONFIG_SLUB_TINY
199 #define __fastpath_inline __always_inline
200 #else
201 #define __fastpath_inline
202 #endif
203
204 #ifdef CONFIG_SLUB_DEBUG
205 #ifdef CONFIG_SLUB_DEBUG_ON
206 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
207 #else
208 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
209 #endif
210 #endif /* CONFIG_SLUB_DEBUG */
211
212 #ifdef CONFIG_NUMA
213 static DEFINE_STATIC_KEY_FALSE(strict_numa);
214 #endif
215
216 /* Structure holding parameters for get_from_partial() call chain */
217 struct partial_context {
218 gfp_t flags;
219 unsigned int orig_size;
220 };
221
222 /* Structure holding parameters for get_partial_node_bulk() */
223 struct partial_bulk_context {
224 gfp_t flags;
225 unsigned int min_objects;
226 unsigned int max_objects;
227 struct list_head slabs;
228 };
229
kmem_cache_debug(struct kmem_cache * s)230 static inline bool kmem_cache_debug(struct kmem_cache *s)
231 {
232 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
233 }
234
fixup_red_left(struct kmem_cache * s,void * p)235 void *fixup_red_left(struct kmem_cache *s, void *p)
236 {
237 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
238 p += s->red_left_pad;
239
240 return p;
241 }
242
243 /*
244 * Issues still to be resolved:
245 *
246 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
247 *
248 * - Variable sizing of the per node arrays
249 */
250
251 /* Enable to log cmpxchg failures */
252 #undef SLUB_DEBUG_CMPXCHG
253
254 #ifndef CONFIG_SLUB_TINY
255 /*
256 * Minimum number of partial slabs. These will be left on the partial
257 * lists even if they are empty. kmem_cache_shrink may reclaim them.
258 */
259 #define MIN_PARTIAL 5
260
261 /*
262 * Maximum number of desirable partial slabs.
263 * The existence of more partial slabs makes kmem_cache_shrink
264 * sort the partial list by the number of objects in use.
265 */
266 #define MAX_PARTIAL 10
267 #else
268 #define MIN_PARTIAL 0
269 #define MAX_PARTIAL 0
270 #endif
271
272 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
273 SLAB_POISON | SLAB_STORE_USER)
274
275 /*
276 * These debug flags cannot use CMPXCHG because there might be consistency
277 * issues when checking or reading debug information
278 */
279 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
280 SLAB_TRACE)
281
282
283 /*
284 * Debugging flags that require metadata to be stored in the slab. These get
285 * disabled when slab_debug=O is used and a cache's min order increases with
286 * metadata.
287 */
288 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
289
290 #define OO_SHIFT 16
291 #define OO_MASK ((1 << OO_SHIFT) - 1)
292 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
293
294 /* Internal SLUB flags */
295 /* Poison object */
296 #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
297 /* Use cmpxchg_double */
298
299 #ifdef system_has_freelist_aba
300 #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
301 #else
302 #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED
303 #endif
304
305 /*
306 * Tracking user of a slab.
307 */
308 #define TRACK_ADDRS_COUNT 16
309 struct track {
310 unsigned long addr; /* Called from address */
311 #ifdef CONFIG_STACKDEPOT
312 depot_stack_handle_t handle;
313 #endif
314 int cpu; /* Was running on cpu */
315 int pid; /* Pid context */
316 unsigned long when; /* When did the operation occur */
317 };
318
319 enum track_item { TRACK_ALLOC, TRACK_FREE };
320
321 #ifdef SLAB_SUPPORTS_SYSFS
322 static int sysfs_slab_add(struct kmem_cache *);
323 #else
sysfs_slab_add(struct kmem_cache * s)324 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
325 #endif
326
327 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
328 static void debugfs_slab_add(struct kmem_cache *);
329 #else
debugfs_slab_add(struct kmem_cache * s)330 static inline void debugfs_slab_add(struct kmem_cache *s) { }
331 #endif
332
333 enum add_mode {
334 ADD_TO_HEAD,
335 ADD_TO_TAIL,
336 };
337
338 enum stat_item {
339 ALLOC_FASTPATH, /* Allocation from percpu sheaves */
340 ALLOC_SLOWPATH, /* Allocation from partial or new slab */
341 FREE_RCU_SHEAF, /* Free to rcu_free sheaf */
342 FREE_RCU_SHEAF_FAIL, /* Failed to free to a rcu_free sheaf */
343 FREE_FASTPATH, /* Free to percpu sheaves */
344 FREE_SLOWPATH, /* Free to a slab */
345 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
346 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
347 ALLOC_SLAB, /* New slab acquired from page allocator */
348 ALLOC_NODE_MISMATCH, /* Requested node different from cpu sheaf */
349 FREE_SLAB, /* Slab freed to the page allocator */
350 ORDER_FALLBACK, /* Number of times fallback was necessary */
351 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */
352 SHEAF_FLUSH, /* Objects flushed from a sheaf */
353 SHEAF_REFILL, /* Objects refilled to a sheaf */
354 SHEAF_ALLOC, /* Allocation of an empty sheaf */
355 SHEAF_FREE, /* Freeing of an empty sheaf */
356 BARN_GET, /* Got full sheaf from barn */
357 BARN_GET_FAIL, /* Failed to get full sheaf from barn */
358 BARN_PUT, /* Put full sheaf to barn */
359 BARN_PUT_FAIL, /* Failed to put full sheaf to barn */
360 SHEAF_PREFILL_FAST, /* Sheaf prefill grabbed the spare sheaf */
361 SHEAF_PREFILL_SLOW, /* Sheaf prefill found no spare sheaf */
362 SHEAF_PREFILL_OVERSIZE, /* Allocation of oversize sheaf for prefill */
363 SHEAF_RETURN_FAST, /* Sheaf return reattached spare sheaf */
364 SHEAF_RETURN_SLOW, /* Sheaf return could not reattach spare */
365 NR_SLUB_STAT_ITEMS
366 };
367
368 #ifdef CONFIG_SLUB_STATS
369 struct kmem_cache_stats {
370 unsigned int stat[NR_SLUB_STAT_ITEMS];
371 };
372 #endif
373
stat(const struct kmem_cache * s,enum stat_item si)374 static inline void stat(const struct kmem_cache *s, enum stat_item si)
375 {
376 #ifdef CONFIG_SLUB_STATS
377 /*
378 * The rmw is racy on a preemptible kernel but this is acceptable, so
379 * avoid this_cpu_add()'s irq-disable overhead.
380 */
381 raw_cpu_inc(s->cpu_stats->stat[si]);
382 #endif
383 }
384
385 static inline
stat_add(const struct kmem_cache * s,enum stat_item si,int v)386 void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
387 {
388 #ifdef CONFIG_SLUB_STATS
389 raw_cpu_add(s->cpu_stats->stat[si], v);
390 #endif
391 }
392
393 #define MAX_FULL_SHEAVES 10
394 #define MAX_EMPTY_SHEAVES 10
395
396 struct node_barn {
397 spinlock_t lock;
398 struct list_head sheaves_full;
399 struct list_head sheaves_empty;
400 unsigned int nr_full;
401 unsigned int nr_empty;
402 };
403
404 struct slab_sheaf {
405 union {
406 struct rcu_head rcu_head;
407 struct list_head barn_list;
408 /* only used for prefilled sheafs */
409 struct {
410 unsigned int capacity;
411 bool pfmemalloc;
412 };
413 };
414 struct kmem_cache *cache;
415 unsigned int size;
416 int node; /* only used for rcu_sheaf */
417 void *objects[];
418 };
419
420 struct slub_percpu_sheaves {
421 local_trylock_t lock;
422 struct slab_sheaf *main; /* never NULL when unlocked */
423 struct slab_sheaf *spare; /* empty or full, may be NULL */
424 struct slab_sheaf *rcu_free; /* for batching kfree_rcu() */
425 };
426
427 /*
428 * The slab lists for all objects.
429 */
430 struct kmem_cache_node {
431 spinlock_t list_lock;
432 unsigned long nr_partial;
433 struct list_head partial;
434 #ifdef CONFIG_SLUB_DEBUG
435 atomic_long_t nr_slabs;
436 atomic_long_t total_objects;
437 struct list_head full;
438 #endif
439 };
440
get_node(struct kmem_cache * s,int node)441 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
442 {
443 return s->per_node[node].node;
444 }
445
get_barn_node(struct kmem_cache * s,int node)446 static inline struct node_barn *get_barn_node(struct kmem_cache *s, int node)
447 {
448 return s->per_node[node].barn;
449 }
450
451 /*
452 * Get the barn of the current cpu's NUMA node. It may be a memoryless node.
453 */
get_barn(struct kmem_cache * s)454 static inline struct node_barn *get_barn(struct kmem_cache *s)
455 {
456 return get_barn_node(s, numa_node_id());
457 }
458
459 /*
460 * Iterator over all nodes. The body will be executed for each node that has
461 * a kmem_cache_node structure allocated (which is true for all online nodes)
462 */
463 #define for_each_kmem_cache_node(__s, __node, __n) \
464 for (__node = 0; __node < nr_node_ids; __node++) \
465 if ((__n = get_node(__s, __node)))
466
467 /*
468 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
469 * Corresponds to node_state[N_MEMORY], but can temporarily
470 * differ during memory hotplug/hotremove operations.
471 * Protected by slab_mutex.
472 */
473 static nodemask_t slab_nodes;
474
475 /*
476 * Similar to slab_nodes but for where we have node_barn allocated.
477 * Corresponds to N_ONLINE nodes.
478 */
479 static nodemask_t slab_barn_nodes;
480
481 /*
482 * Workqueue used for flushing cpu and kfree_rcu sheaves.
483 */
484 static struct workqueue_struct *flushwq;
485
486 struct slub_flush_work {
487 struct work_struct work;
488 struct kmem_cache *s;
489 bool skip;
490 };
491
492 static DEFINE_MUTEX(flush_lock);
493 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
494
495 /********************************************************************
496 * Core slab cache functions
497 *******************************************************************/
498
499 /*
500 * Returns freelist pointer (ptr). With hardening, this is obfuscated
501 * with an XOR of the address where the pointer is held and a per-cache
502 * random number.
503 */
freelist_ptr_encode(const struct kmem_cache * s,void * ptr,unsigned long ptr_addr)504 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
505 void *ptr, unsigned long ptr_addr)
506 {
507 unsigned long encoded;
508
509 #ifdef CONFIG_SLAB_FREELIST_HARDENED
510 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
511 #else
512 encoded = (unsigned long)ptr;
513 #endif
514 return (freeptr_t){.v = encoded};
515 }
516
freelist_ptr_decode(const struct kmem_cache * s,freeptr_t ptr,unsigned long ptr_addr)517 static inline void *freelist_ptr_decode(const struct kmem_cache *s,
518 freeptr_t ptr, unsigned long ptr_addr)
519 {
520 void *decoded;
521
522 #ifdef CONFIG_SLAB_FREELIST_HARDENED
523 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
524 #else
525 decoded = (void *)ptr.v;
526 #endif
527 return decoded;
528 }
529
get_freepointer(struct kmem_cache * s,void * object)530 static inline void *get_freepointer(struct kmem_cache *s, void *object)
531 {
532 unsigned long ptr_addr;
533 freeptr_t p;
534
535 object = kasan_reset_tag(object);
536 ptr_addr = (unsigned long)object + s->offset;
537 p = *(freeptr_t *)(ptr_addr);
538 return freelist_ptr_decode(s, p, ptr_addr);
539 }
540
set_freepointer(struct kmem_cache * s,void * object,void * fp)541 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
542 {
543 unsigned long freeptr_addr = (unsigned long)object + s->offset;
544
545 #ifdef CONFIG_SLAB_FREELIST_HARDENED
546 BUG_ON(object == fp); /* naive detection of double free or corruption */
547 #endif
548
549 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
550 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
551 }
552
553 /*
554 * See comment in calculate_sizes().
555 */
freeptr_outside_object(struct kmem_cache * s)556 static inline bool freeptr_outside_object(struct kmem_cache *s)
557 {
558 return s->offset >= s->inuse;
559 }
560
561 /*
562 * Return offset of the end of info block which is inuse + free pointer if
563 * not overlapping with object.
564 */
get_info_end(struct kmem_cache * s)565 static inline unsigned int get_info_end(struct kmem_cache *s)
566 {
567 if (freeptr_outside_object(s))
568 return s->inuse + sizeof(void *);
569 else
570 return s->inuse;
571 }
572
573 /* Loop over all objects in a slab */
574 #define for_each_object(__p, __s, __addr, __objects) \
575 for (__p = fixup_red_left(__s, __addr); \
576 __p < (__addr) + (__objects) * (__s)->size; \
577 __p += (__s)->size)
578
order_objects(unsigned int order,unsigned int size)579 static inline unsigned int order_objects(unsigned int order, unsigned int size)
580 {
581 return ((unsigned int)PAGE_SIZE << order) / size;
582 }
583
oo_make(unsigned int order,unsigned int size)584 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
585 unsigned int size)
586 {
587 struct kmem_cache_order_objects x = {
588 (order << OO_SHIFT) + order_objects(order, size)
589 };
590
591 return x;
592 }
593
oo_order(struct kmem_cache_order_objects x)594 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
595 {
596 return x.x >> OO_SHIFT;
597 }
598
oo_objects(struct kmem_cache_order_objects x)599 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
600 {
601 return x.x & OO_MASK;
602 }
603
604 /*
605 * If network-based swap is enabled, slub must keep track of whether memory
606 * were allocated from pfmemalloc reserves.
607 */
slab_test_pfmemalloc(const struct slab * slab)608 static inline bool slab_test_pfmemalloc(const struct slab *slab)
609 {
610 return test_bit(SL_pfmemalloc, &slab->flags.f);
611 }
612
slab_set_pfmemalloc(struct slab * slab)613 static inline void slab_set_pfmemalloc(struct slab *slab)
614 {
615 set_bit(SL_pfmemalloc, &slab->flags.f);
616 }
617
__slab_clear_pfmemalloc(struct slab * slab)618 static inline void __slab_clear_pfmemalloc(struct slab *slab)
619 {
620 __clear_bit(SL_pfmemalloc, &slab->flags.f);
621 }
622
623 /*
624 * Per slab locking using the pagelock
625 */
slab_lock(struct slab * slab)626 static __always_inline void slab_lock(struct slab *slab)
627 {
628 bit_spin_lock(SL_locked, &slab->flags.f);
629 }
630
slab_unlock(struct slab * slab)631 static __always_inline void slab_unlock(struct slab *slab)
632 {
633 bit_spin_unlock(SL_locked, &slab->flags.f);
634 }
635
636 static inline bool
__update_freelist_fast(struct slab * slab,struct freelist_counters * old,struct freelist_counters * new)637 __update_freelist_fast(struct slab *slab, struct freelist_counters *old,
638 struct freelist_counters *new)
639 {
640 #ifdef system_has_freelist_aba
641 return try_cmpxchg_freelist(&slab->freelist_counters,
642 &old->freelist_counters,
643 new->freelist_counters);
644 #else
645 return false;
646 #endif
647 }
648
649 static inline bool
__update_freelist_slow(struct slab * slab,struct freelist_counters * old,struct freelist_counters * new)650 __update_freelist_slow(struct slab *slab, struct freelist_counters *old,
651 struct freelist_counters *new)
652 {
653 bool ret = false;
654
655 slab_lock(slab);
656 if (slab->freelist == old->freelist &&
657 slab->counters == old->counters) {
658 slab->freelist = new->freelist;
659 /* prevent tearing for the read in get_partial_node_bulk() */
660 WRITE_ONCE(slab->counters, new->counters);
661 ret = true;
662 }
663 slab_unlock(slab);
664
665 return ret;
666 }
667
668 /*
669 * Interrupts must be disabled (for the fallback code to work right), typically
670 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
671 * part of bit_spin_lock(), is sufficient because the policy is not to allow any
672 * allocation/ free operation in hardirq context. Therefore nothing can
673 * interrupt the operation.
674 */
__slab_update_freelist(struct kmem_cache * s,struct slab * slab,struct freelist_counters * old,struct freelist_counters * new,const char * n)675 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
676 struct freelist_counters *old, struct freelist_counters *new, const char *n)
677 {
678 bool ret;
679
680 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
681 lockdep_assert_irqs_disabled();
682
683 if (s->flags & __CMPXCHG_DOUBLE)
684 ret = __update_freelist_fast(slab, old, new);
685 else
686 ret = __update_freelist_slow(slab, old, new);
687
688 if (likely(ret))
689 return true;
690
691 cpu_relax();
692 stat(s, CMPXCHG_DOUBLE_FAIL);
693
694 #ifdef SLUB_DEBUG_CMPXCHG
695 pr_info("%s %s: cmpxchg double redo ", n, s->name);
696 #endif
697
698 return false;
699 }
700
slab_update_freelist(struct kmem_cache * s,struct slab * slab,struct freelist_counters * old,struct freelist_counters * new,const char * n)701 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
702 struct freelist_counters *old, struct freelist_counters *new, const char *n)
703 {
704 bool ret;
705
706 if (s->flags & __CMPXCHG_DOUBLE) {
707 ret = __update_freelist_fast(slab, old, new);
708 } else {
709 unsigned long flags;
710
711 local_irq_save(flags);
712 ret = __update_freelist_slow(slab, old, new);
713 local_irq_restore(flags);
714 }
715 if (likely(ret))
716 return true;
717
718 cpu_relax();
719 stat(s, CMPXCHG_DOUBLE_FAIL);
720
721 #ifdef SLUB_DEBUG_CMPXCHG
722 pr_info("%s %s: cmpxchg double redo ", n, s->name);
723 #endif
724
725 return false;
726 }
727
728 /*
729 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
730 * family will round up the real request size to these fixed ones, so
731 * there could be an extra area than what is requested. Save the original
732 * request size in the meta data area, for better debug and sanity check.
733 */
set_orig_size(struct kmem_cache * s,void * object,unsigned long orig_size)734 static inline void set_orig_size(struct kmem_cache *s,
735 void *object, unsigned long orig_size)
736 {
737 void *p = kasan_reset_tag(object);
738
739 if (!slub_debug_orig_size(s))
740 return;
741
742 p += get_info_end(s);
743 p += sizeof(struct track) * 2;
744
745 *(unsigned long *)p = orig_size;
746 }
747
get_orig_size(struct kmem_cache * s,void * object)748 static inline unsigned long get_orig_size(struct kmem_cache *s, void *object)
749 {
750 void *p = kasan_reset_tag(object);
751
752 if (is_kfence_address(object))
753 return kfence_ksize(object);
754
755 if (!slub_debug_orig_size(s))
756 return s->object_size;
757
758 p += get_info_end(s);
759 p += sizeof(struct track) * 2;
760
761 return *(unsigned long *)p;
762 }
763
764 #ifdef CONFIG_SLAB_OBJ_EXT
765
766 /*
767 * Check if memory cgroup or memory allocation profiling is enabled.
768 * If enabled, SLUB tries to reduce memory overhead of accounting
769 * slab objects. If neither is enabled when this function is called,
770 * the optimization is simply skipped to avoid affecting caches that do not
771 * need slabobj_ext metadata.
772 *
773 * However, this may disable optimization when memory cgroup or memory
774 * allocation profiling is used, but slabs are created too early
775 * even before those subsystems are initialized.
776 */
need_slab_obj_exts(struct kmem_cache * s)777 static inline bool need_slab_obj_exts(struct kmem_cache *s)
778 {
779 if (s->flags & SLAB_NO_OBJ_EXT)
780 return false;
781
782 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
783 return true;
784
785 if (mem_alloc_profiling_enabled())
786 return true;
787
788 return false;
789 }
790
obj_exts_size_in_slab(struct slab * slab)791 static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
792 {
793 return sizeof(struct slabobj_ext) * slab->objects;
794 }
795
obj_exts_offset_in_slab(struct kmem_cache * s,struct slab * slab)796 static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
797 struct slab *slab)
798 {
799 unsigned long objext_offset;
800
801 objext_offset = s->size * slab->objects;
802 objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext));
803 return objext_offset;
804 }
805
obj_exts_fit_within_slab_leftover(struct kmem_cache * s,struct slab * slab)806 static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
807 struct slab *slab)
808 {
809 unsigned long objext_offset = obj_exts_offset_in_slab(s, slab);
810 unsigned long objext_size = obj_exts_size_in_slab(slab);
811
812 return objext_offset + objext_size <= slab_size(slab);
813 }
814
obj_exts_in_slab(struct kmem_cache * s,struct slab * slab)815 static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
816 {
817 unsigned long obj_exts;
818 unsigned long start;
819 unsigned long end;
820
821 obj_exts = slab_obj_exts(slab);
822 if (!obj_exts)
823 return false;
824
825 start = (unsigned long)slab_address(slab);
826 end = start + slab_size(slab);
827 return (obj_exts >= start) && (obj_exts < end);
828 }
829 #else
need_slab_obj_exts(struct kmem_cache * s)830 static inline bool need_slab_obj_exts(struct kmem_cache *s)
831 {
832 return false;
833 }
834
obj_exts_size_in_slab(struct slab * slab)835 static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
836 {
837 return 0;
838 }
839
obj_exts_offset_in_slab(struct kmem_cache * s,struct slab * slab)840 static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
841 struct slab *slab)
842 {
843 return 0;
844 }
845
obj_exts_fit_within_slab_leftover(struct kmem_cache * s,struct slab * slab)846 static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
847 struct slab *slab)
848 {
849 return false;
850 }
851
obj_exts_in_slab(struct kmem_cache * s,struct slab * slab)852 static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
853 {
854 return false;
855 }
856
857 #endif
858
859 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
obj_exts_in_object(struct kmem_cache * s,struct slab * slab)860 static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
861 {
862 /*
863 * Note we cannot rely on the SLAB_OBJ_EXT_IN_OBJ flag here and need to
864 * check the stride. A cache can have SLAB_OBJ_EXT_IN_OBJ set, but
865 * allocations within_slab_leftover are preferred. And those may be
866 * possible or not depending on the particular slab's size.
867 */
868 return obj_exts_in_slab(s, slab) &&
869 (slab_get_stride(slab) == s->size);
870 }
871
obj_exts_offset_in_object(struct kmem_cache * s)872 static unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
873 {
874 unsigned int offset = get_info_end(s);
875
876 if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
877 offset += sizeof(struct track) * 2;
878
879 if (slub_debug_orig_size(s))
880 offset += sizeof(unsigned long);
881
882 offset += kasan_metadata_size(s, false);
883
884 return offset;
885 }
886 #else
obj_exts_in_object(struct kmem_cache * s,struct slab * slab)887 static inline bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
888 {
889 return false;
890 }
891
obj_exts_offset_in_object(struct kmem_cache * s)892 static inline unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
893 {
894 return 0;
895 }
896 #endif
897
898 #ifdef CONFIG_SLUB_DEBUG
899
900 /*
901 * For debugging context when we want to check if the struct slab pointer
902 * appears to be valid.
903 */
validate_slab_ptr(struct slab * slab)904 static inline bool validate_slab_ptr(struct slab *slab)
905 {
906 return PageSlab(slab_page(slab));
907 }
908
909 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
910 static DEFINE_SPINLOCK(object_map_lock);
911
__fill_map(unsigned long * obj_map,struct kmem_cache * s,struct slab * slab)912 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
913 struct slab *slab)
914 {
915 void *addr = slab_address(slab);
916 void *p;
917
918 bitmap_zero(obj_map, slab->objects);
919
920 for (p = slab->freelist; p; p = get_freepointer(s, p))
921 set_bit(__obj_to_index(s, addr, p), obj_map);
922 }
923
924 #if IS_ENABLED(CONFIG_KUNIT)
slab_add_kunit_errors(void)925 static bool slab_add_kunit_errors(void)
926 {
927 struct kunit_resource *resource;
928
929 if (!kunit_get_current_test())
930 return false;
931
932 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
933 if (!resource)
934 return false;
935
936 (*(int *)resource->data)++;
937 kunit_put_resource(resource);
938 return true;
939 }
940
slab_in_kunit_test(void)941 bool slab_in_kunit_test(void)
942 {
943 struct kunit_resource *resource;
944
945 if (!kunit_get_current_test())
946 return false;
947
948 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
949 if (!resource)
950 return false;
951
952 kunit_put_resource(resource);
953 return true;
954 }
955 #else
slab_add_kunit_errors(void)956 static inline bool slab_add_kunit_errors(void) { return false; }
957 #endif
958
size_from_object(struct kmem_cache * s)959 static inline unsigned int size_from_object(struct kmem_cache *s)
960 {
961 if (s->flags & SLAB_RED_ZONE)
962 return s->size - s->red_left_pad;
963
964 return s->size;
965 }
966
restore_red_left(struct kmem_cache * s,void * p)967 static inline void *restore_red_left(struct kmem_cache *s, void *p)
968 {
969 if (s->flags & SLAB_RED_ZONE)
970 p -= s->red_left_pad;
971
972 return p;
973 }
974
975 /*
976 * Debug settings:
977 */
978 #if defined(CONFIG_SLUB_DEBUG_ON)
979 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
980 #else
981 static slab_flags_t slub_debug;
982 #endif
983
984 static const char *slub_debug_string __ro_after_init;
985 static int disable_higher_order_debug;
986
987 /*
988 * Object debugging
989 */
990
991 /* Verify that a pointer has an address that is valid within a slab page */
check_valid_pointer(struct kmem_cache * s,struct slab * slab,void * object)992 static inline int check_valid_pointer(struct kmem_cache *s,
993 struct slab *slab, void *object)
994 {
995 void *base;
996
997 if (!object)
998 return 1;
999
1000 base = slab_address(slab);
1001 object = kasan_reset_tag(object);
1002 object = restore_red_left(s, object);
1003 if (object < base || object >= base + slab->objects * s->size ||
1004 (object - base) % s->size) {
1005 return 0;
1006 }
1007
1008 return 1;
1009 }
1010
print_section(char * level,char * text,u8 * addr,unsigned int length)1011 static void print_section(char *level, char *text, u8 *addr,
1012 unsigned int length)
1013 {
1014 metadata_access_enable();
1015 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
1016 16, 1, kasan_reset_tag((void *)addr), length, 1);
1017 metadata_access_disable();
1018 }
1019
get_track(struct kmem_cache * s,void * object,enum track_item alloc)1020 static struct track *get_track(struct kmem_cache *s, void *object,
1021 enum track_item alloc)
1022 {
1023 struct track *p;
1024
1025 p = object + get_info_end(s);
1026
1027 return kasan_reset_tag(p + alloc);
1028 }
1029
1030 #ifdef CONFIG_STACKDEPOT
set_track_prepare(gfp_t gfp_flags)1031 static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
1032 {
1033 depot_stack_handle_t handle;
1034 unsigned long entries[TRACK_ADDRS_COUNT];
1035 unsigned int nr_entries;
1036
1037 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
1038 handle = stack_depot_save(entries, nr_entries, gfp_flags);
1039
1040 return handle;
1041 }
1042 #else
set_track_prepare(gfp_t gfp_flags)1043 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
1044 {
1045 return 0;
1046 }
1047 #endif
1048
set_track_update(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr,depot_stack_handle_t handle)1049 static void set_track_update(struct kmem_cache *s, void *object,
1050 enum track_item alloc, unsigned long addr,
1051 depot_stack_handle_t handle)
1052 {
1053 struct track *p = get_track(s, object, alloc);
1054
1055 #ifdef CONFIG_STACKDEPOT
1056 p->handle = handle;
1057 #endif
1058 p->addr = addr;
1059 p->cpu = raw_smp_processor_id();
1060 p->pid = current->pid;
1061 p->when = jiffies;
1062 }
1063
set_track(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr,gfp_t gfp_flags)1064 static __always_inline void set_track(struct kmem_cache *s, void *object,
1065 enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
1066 {
1067 depot_stack_handle_t handle = set_track_prepare(gfp_flags);
1068
1069 set_track_update(s, object, alloc, addr, handle);
1070 }
1071
init_tracking(struct kmem_cache * s,void * object)1072 static void init_tracking(struct kmem_cache *s, void *object)
1073 {
1074 struct track *p;
1075
1076 if (!(s->flags & SLAB_STORE_USER))
1077 return;
1078
1079 p = get_track(s, object, TRACK_ALLOC);
1080 memset(p, 0, 2*sizeof(struct track));
1081 }
1082
print_track(const char * s,struct track * t,unsigned long pr_time)1083 static void print_track(const char *s, struct track *t, unsigned long pr_time)
1084 {
1085 depot_stack_handle_t handle __maybe_unused;
1086
1087 if (!t->addr)
1088 return;
1089
1090 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
1091 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
1092 #ifdef CONFIG_STACKDEPOT
1093 handle = READ_ONCE(t->handle);
1094 if (handle)
1095 stack_depot_print(handle);
1096 else
1097 pr_err("object allocation/free stack trace missing\n");
1098 #endif
1099 }
1100
print_tracking(struct kmem_cache * s,void * object)1101 void print_tracking(struct kmem_cache *s, void *object)
1102 {
1103 unsigned long pr_time = jiffies;
1104 if (!(s->flags & SLAB_STORE_USER))
1105 return;
1106
1107 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
1108 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
1109 }
1110
print_slab_info(const struct slab * slab)1111 static void print_slab_info(const struct slab *slab)
1112 {
1113 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
1114 slab, slab->objects, slab->inuse, slab->freelist,
1115 &slab->flags.f);
1116 }
1117
skip_orig_size_check(struct kmem_cache * s,const void * object)1118 void skip_orig_size_check(struct kmem_cache *s, const void *object)
1119 {
1120 set_orig_size(s, (void *)object, s->object_size);
1121 }
1122
__slab_bug(struct kmem_cache * s,const char * fmt,va_list argsp)1123 static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
1124 {
1125 struct va_format vaf;
1126 va_list args;
1127
1128 va_copy(args, argsp);
1129 vaf.fmt = fmt;
1130 vaf.va = &args;
1131 pr_err("=============================================================================\n");
1132 pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
1133 pr_err("-----------------------------------------------------------------------------\n\n");
1134 va_end(args);
1135 }
1136
slab_bug(struct kmem_cache * s,const char * fmt,...)1137 static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
1138 {
1139 va_list args;
1140
1141 va_start(args, fmt);
1142 __slab_bug(s, fmt, args);
1143 va_end(args);
1144 }
1145
1146 __printf(2, 3)
slab_fix(struct kmem_cache * s,const char * fmt,...)1147 static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
1148 {
1149 struct va_format vaf;
1150 va_list args;
1151
1152 if (slab_add_kunit_errors())
1153 return;
1154
1155 va_start(args, fmt);
1156 vaf.fmt = fmt;
1157 vaf.va = &args;
1158 pr_err("FIX %s: %pV\n", s->name, &vaf);
1159 va_end(args);
1160 }
1161
print_trailer(struct kmem_cache * s,struct slab * slab,u8 * p)1162 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1163 {
1164 unsigned int off; /* Offset of last byte */
1165 u8 *addr = slab_address(slab);
1166
1167 print_tracking(s, p);
1168
1169 print_slab_info(slab);
1170
1171 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1172 p, p - addr, get_freepointer(s, p));
1173
1174 if (s->flags & SLAB_RED_ZONE)
1175 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
1176 s->red_left_pad);
1177 else if (p > addr + 16)
1178 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1179
1180 print_section(KERN_ERR, "Object ", p,
1181 min_t(unsigned int, s->object_size, PAGE_SIZE));
1182 if (s->flags & SLAB_RED_ZONE)
1183 print_section(KERN_ERR, "Redzone ", p + s->object_size,
1184 s->inuse - s->object_size);
1185
1186 off = get_info_end(s);
1187
1188 if (s->flags & SLAB_STORE_USER)
1189 off += 2 * sizeof(struct track);
1190
1191 if (slub_debug_orig_size(s))
1192 off += sizeof(unsigned long);
1193
1194 off += kasan_metadata_size(s, false);
1195
1196 if (obj_exts_in_object(s, slab))
1197 off += sizeof(struct slabobj_ext);
1198
1199 if (off != size_from_object(s))
1200 /* Beginning of the filler is the free pointer */
1201 print_section(KERN_ERR, "Padding ", p + off,
1202 size_from_object(s) - off);
1203 }
1204
object_err(struct kmem_cache * s,struct slab * slab,u8 * object,const char * reason)1205 static void object_err(struct kmem_cache *s, struct slab *slab,
1206 u8 *object, const char *reason)
1207 {
1208 if (slab_add_kunit_errors())
1209 return;
1210
1211 slab_bug(s, reason);
1212 if (!object || !check_valid_pointer(s, slab, object)) {
1213 print_slab_info(slab);
1214 pr_err("Invalid pointer 0x%p\n", object);
1215 } else {
1216 print_trailer(s, slab, object);
1217 }
1218 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1219
1220 WARN_ON(1);
1221 }
1222
__slab_err(struct slab * slab)1223 static void __slab_err(struct slab *slab)
1224 {
1225 if (slab_in_kunit_test())
1226 return;
1227
1228 print_slab_info(slab);
1229 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1230
1231 WARN_ON(1);
1232 }
1233
slab_err(struct kmem_cache * s,struct slab * slab,const char * fmt,...)1234 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1235 const char *fmt, ...)
1236 {
1237 va_list args;
1238
1239 if (slab_add_kunit_errors())
1240 return;
1241
1242 va_start(args, fmt);
1243 __slab_bug(s, fmt, args);
1244 va_end(args);
1245
1246 __slab_err(slab);
1247 }
1248
init_object(struct kmem_cache * s,void * object,u8 val)1249 static void init_object(struct kmem_cache *s, void *object, u8 val)
1250 {
1251 u8 *p = kasan_reset_tag(object);
1252 unsigned int poison_size = s->object_size;
1253
1254 if (s->flags & SLAB_RED_ZONE) {
1255 /*
1256 * Here and below, avoid overwriting the KMSAN shadow. Keeping
1257 * the shadow makes it possible to distinguish uninit-value
1258 * from use-after-free.
1259 */
1260 memset_no_sanitize_memory(p - s->red_left_pad, val,
1261 s->red_left_pad);
1262
1263 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1264 /*
1265 * Redzone the extra allocated space by kmalloc than
1266 * requested, and the poison size will be limited to
1267 * the original request size accordingly.
1268 */
1269 poison_size = get_orig_size(s, object);
1270 }
1271 }
1272
1273 if (s->flags & __OBJECT_POISON) {
1274 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1);
1275 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1);
1276 }
1277
1278 if (s->flags & SLAB_RED_ZONE)
1279 memset_no_sanitize_memory(p + poison_size, val,
1280 s->inuse - poison_size);
1281 }
1282
restore_bytes(struct kmem_cache * s,const char * message,u8 data,void * from,void * to)1283 static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
1284 void *from, void *to)
1285 {
1286 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1287 memset(from, data, to - from);
1288 }
1289
1290 #ifdef CONFIG_KMSAN
1291 #define pad_check_attributes noinline __no_kmsan_checks
1292 #else
1293 #define pad_check_attributes
1294 #endif
1295
1296 static pad_check_attributes int
check_bytes_and_report(struct kmem_cache * s,struct slab * slab,u8 * object,const char * what,u8 * start,unsigned int value,unsigned int bytes,bool slab_obj_print)1297 check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1298 u8 *object, const char *what, u8 *start, unsigned int value,
1299 unsigned int bytes, bool slab_obj_print)
1300 {
1301 u8 *fault;
1302 u8 *end;
1303 u8 *addr = slab_address(slab);
1304
1305 metadata_access_enable();
1306 fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1307 metadata_access_disable();
1308 if (!fault)
1309 return 1;
1310
1311 end = start + bytes;
1312 while (end > fault && end[-1] == value)
1313 end--;
1314
1315 if (slab_add_kunit_errors())
1316 goto skip_bug_print;
1317
1318 pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1319 what, fault, end - 1, fault - addr, fault[0], value);
1320
1321 if (slab_obj_print)
1322 object_err(s, slab, object, "Object corrupt");
1323
1324 skip_bug_print:
1325 restore_bytes(s, what, value, fault, end);
1326 return 0;
1327 }
1328
1329 /*
1330 * Object field layout:
1331 *
1332 * [Left redzone padding] (if SLAB_RED_ZONE)
1333 * - Field size: s->red_left_pad
1334 * - Immediately precedes each object when SLAB_RED_ZONE is set.
1335 * - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1336 * 0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
1337 *
1338 * [Object bytes] (object address starts here)
1339 * - Field size: s->object_size
1340 * - Object payload bytes.
1341 * - If the freepointer may overlap the object, it is stored inside
1342 * the object (typically near the middle).
1343 * - Poisoning uses 0x6b (POISON_FREE) and the last byte is
1344 * 0xa5 (POISON_END) when __OBJECT_POISON is enabled.
1345 *
1346 * [Word-align padding] (right redzone when SLAB_RED_ZONE is set)
1347 * - Field size: s->inuse - s->object_size
1348 * - If redzoning is enabled and ALIGN(size, sizeof(void *)) adds no
1349 * padding, explicitly extend by one word so the right redzone is
1350 * non-empty.
1351 * - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1352 * 0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
1353 *
1354 * [Metadata starts at object + s->inuse]
1355 * - A. freelist pointer (if freeptr_outside_object)
1356 * - B. alloc tracking (SLAB_STORE_USER)
1357 * - C. free tracking (SLAB_STORE_USER)
1358 * - D. original request size (SLAB_KMALLOC && SLAB_STORE_USER)
1359 * - E. KASAN metadata (if enabled)
1360 *
1361 * [Mandatory padding] (if CONFIG_SLUB_DEBUG && SLAB_RED_ZONE)
1362 * - One mandatory debug word to guarantee a minimum poisoned gap
1363 * between metadata and the next object, independent of alignment.
1364 * - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set.
1365 * [Final alignment padding]
1366 * - Bytes added by ALIGN(size, s->align) to reach s->size.
1367 * - When the padding is large enough, it can be used to store
1368 * struct slabobj_ext for accounting metadata (obj_exts_in_object()).
1369 * - The remaining bytes (if any) are filled with 0x5a (POISON_INUSE)
1370 * when SLAB_POISON is set.
1371 *
1372 * Notes:
1373 * - Redzones are filled by init_object() with SLUB_RED_ACTIVE/INACTIVE.
1374 * - Object contents are poisoned with POISON_FREE/END when __OBJECT_POISON.
1375 * - The trailing padding is pre-filled with POISON_INUSE by
1376 * setup_slab_debug() when SLAB_POISON is set, and is validated by
1377 * check_pad_bytes().
1378 * - The first object pointer is slab_address(slab) +
1379 * (s->red_left_pad if redzoning); subsequent objects are reached by
1380 * adding s->size each time.
1381 *
1382 * If a slab cache flag relies on specific metadata to exist at a fixed
1383 * offset, the flag must be included in SLAB_NEVER_MERGE to prevent merging.
1384 * Otherwise, the cache would misbehave as s->object_size and s->inuse are
1385 * adjusted during cache merging (see __kmem_cache_alias()).
1386 */
check_pad_bytes(struct kmem_cache * s,struct slab * slab,u8 * p)1387 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1388 {
1389 unsigned long off = get_info_end(s); /* The end of info */
1390
1391 if (s->flags & SLAB_STORE_USER) {
1392 /* We also have user information there */
1393 off += 2 * sizeof(struct track);
1394
1395 if (s->flags & SLAB_KMALLOC)
1396 off += sizeof(unsigned long);
1397 }
1398
1399 off += kasan_metadata_size(s, false);
1400
1401 if (obj_exts_in_object(s, slab))
1402 off += sizeof(struct slabobj_ext);
1403
1404 if (size_from_object(s) == off)
1405 return 1;
1406
1407 return check_bytes_and_report(s, slab, p, "Object padding",
1408 p + off, POISON_INUSE, size_from_object(s) - off, true);
1409 }
1410
1411 /* Check the pad bytes at the end of a slab page */
1412 static pad_check_attributes void
slab_pad_check(struct kmem_cache * s,struct slab * slab)1413 slab_pad_check(struct kmem_cache *s, struct slab *slab)
1414 {
1415 u8 *start;
1416 u8 *fault;
1417 u8 *end;
1418 u8 *pad;
1419 int length;
1420 int remainder;
1421
1422 if (!(s->flags & SLAB_POISON))
1423 return;
1424
1425 start = slab_address(slab);
1426 length = slab_size(slab);
1427 end = start + length;
1428
1429 if (obj_exts_in_slab(s, slab) && !obj_exts_in_object(s, slab)) {
1430 remainder = length;
1431 remainder -= obj_exts_offset_in_slab(s, slab);
1432 remainder -= obj_exts_size_in_slab(slab);
1433 } else {
1434 remainder = length % s->size;
1435 }
1436
1437 if (!remainder)
1438 return;
1439
1440 pad = end - remainder;
1441 metadata_access_enable();
1442 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1443 metadata_access_disable();
1444 if (!fault)
1445 return;
1446 while (end > fault && end[-1] == POISON_INUSE)
1447 end--;
1448
1449 slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1450 fault, end - 1, fault - start);
1451 print_section(KERN_ERR, "Padding ", pad, remainder);
1452 __slab_err(slab);
1453
1454 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1455 }
1456
check_object(struct kmem_cache * s,struct slab * slab,void * object,u8 val)1457 static int check_object(struct kmem_cache *s, struct slab *slab,
1458 void *object, u8 val)
1459 {
1460 u8 *p = object;
1461 u8 *endobject = object + s->object_size;
1462 unsigned int orig_size, kasan_meta_size;
1463 int ret = 1;
1464
1465 if (s->flags & SLAB_RED_ZONE) {
1466 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1467 object - s->red_left_pad, val, s->red_left_pad, ret))
1468 ret = 0;
1469
1470 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1471 endobject, val, s->inuse - s->object_size, ret))
1472 ret = 0;
1473
1474 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1475 orig_size = get_orig_size(s, object);
1476
1477 if (s->object_size > orig_size &&
1478 !check_bytes_and_report(s, slab, object,
1479 "kmalloc Redzone", p + orig_size,
1480 val, s->object_size - orig_size, ret)) {
1481 ret = 0;
1482 }
1483 }
1484 } else {
1485 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1486 if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1487 endobject, POISON_INUSE,
1488 s->inuse - s->object_size, ret))
1489 ret = 0;
1490 }
1491 }
1492
1493 if (s->flags & SLAB_POISON) {
1494 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1495 /*
1496 * KASAN can save its free meta data inside of the
1497 * object at offset 0. Thus, skip checking the part of
1498 * the redzone that overlaps with the meta data.
1499 */
1500 kasan_meta_size = kasan_metadata_size(s, true);
1501 if (kasan_meta_size < s->object_size - 1 &&
1502 !check_bytes_and_report(s, slab, p, "Poison",
1503 p + kasan_meta_size, POISON_FREE,
1504 s->object_size - kasan_meta_size - 1, ret))
1505 ret = 0;
1506 if (kasan_meta_size < s->object_size &&
1507 !check_bytes_and_report(s, slab, p, "End Poison",
1508 p + s->object_size - 1, POISON_END, 1, ret))
1509 ret = 0;
1510 }
1511 /*
1512 * check_pad_bytes cleans up on its own.
1513 */
1514 if (!check_pad_bytes(s, slab, p))
1515 ret = 0;
1516 }
1517
1518 /*
1519 * Cannot check freepointer while object is allocated if
1520 * object and freepointer overlap.
1521 */
1522 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1523 !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1524 object_err(s, slab, p, "Freepointer corrupt");
1525 /*
1526 * No choice but to zap it and thus lose the remainder
1527 * of the free objects in this slab. May cause
1528 * another error because the object count is now wrong.
1529 */
1530 set_freepointer(s, p, NULL);
1531 ret = 0;
1532 }
1533
1534 return ret;
1535 }
1536
1537 /*
1538 * Checks if the slab state looks sane. Assumes the struct slab pointer
1539 * was either obtained in a way that ensures it's valid, or validated
1540 * by validate_slab_ptr()
1541 */
check_slab(struct kmem_cache * s,struct slab * slab)1542 static int check_slab(struct kmem_cache *s, struct slab *slab)
1543 {
1544 int maxobj;
1545
1546 maxobj = order_objects(slab_order(slab), s->size);
1547 if (slab->objects > maxobj) {
1548 slab_err(s, slab, "objects %u > max %u",
1549 slab->objects, maxobj);
1550 return 0;
1551 }
1552 if (slab->inuse > slab->objects) {
1553 slab_err(s, slab, "inuse %u > max %u",
1554 slab->inuse, slab->objects);
1555 return 0;
1556 }
1557 if (slab->frozen) {
1558 slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
1559 return 0;
1560 }
1561
1562 /* Slab_pad_check fixes things up after itself */
1563 slab_pad_check(s, slab);
1564 return 1;
1565 }
1566
1567 /*
1568 * Determine if a certain object in a slab is on the freelist. Must hold the
1569 * slab lock to guarantee that the chains are in a consistent state.
1570 */
on_freelist(struct kmem_cache * s,struct slab * slab,void * search)1571 static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1572 {
1573 int nr = 0;
1574 void *fp;
1575 void *object = NULL;
1576 int max_objects;
1577
1578 fp = slab->freelist;
1579 while (fp && nr <= slab->objects) {
1580 if (fp == search)
1581 return true;
1582 if (!check_valid_pointer(s, slab, fp)) {
1583 if (object) {
1584 object_err(s, slab, object,
1585 "Freechain corrupt");
1586 set_freepointer(s, object, NULL);
1587 break;
1588 } else {
1589 slab_err(s, slab, "Freepointer corrupt");
1590 slab->freelist = NULL;
1591 slab->inuse = slab->objects;
1592 slab_fix(s, "Freelist cleared");
1593 return false;
1594 }
1595 }
1596 object = fp;
1597 fp = get_freepointer(s, object);
1598 nr++;
1599 }
1600
1601 if (nr > slab->objects) {
1602 slab_err(s, slab, "Freelist cycle detected");
1603 slab->freelist = NULL;
1604 slab->inuse = slab->objects;
1605 slab_fix(s, "Freelist cleared");
1606 return false;
1607 }
1608
1609 max_objects = order_objects(slab_order(slab), s->size);
1610 if (max_objects > MAX_OBJS_PER_PAGE)
1611 max_objects = MAX_OBJS_PER_PAGE;
1612
1613 if (slab->objects != max_objects) {
1614 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1615 slab->objects, max_objects);
1616 slab->objects = max_objects;
1617 slab_fix(s, "Number of objects adjusted");
1618 }
1619 if (slab->inuse != slab->objects - nr) {
1620 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1621 slab->inuse, slab->objects - nr);
1622 slab->inuse = slab->objects - nr;
1623 slab_fix(s, "Object count adjusted");
1624 }
1625 return search == NULL;
1626 }
1627
trace(struct kmem_cache * s,struct slab * slab,void * object,int alloc)1628 static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1629 int alloc)
1630 {
1631 if (s->flags & SLAB_TRACE) {
1632 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1633 s->name,
1634 alloc ? "alloc" : "free",
1635 object, slab->inuse,
1636 slab->freelist);
1637
1638 if (!alloc)
1639 print_section(KERN_INFO, "Object ", (void *)object,
1640 s->object_size);
1641
1642 dump_stack();
1643 }
1644 }
1645
1646 /*
1647 * Tracking of fully allocated slabs for debugging purposes.
1648 */
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab)1649 static void add_full(struct kmem_cache *s,
1650 struct kmem_cache_node *n, struct slab *slab)
1651 {
1652 if (!(s->flags & SLAB_STORE_USER))
1653 return;
1654
1655 lockdep_assert_held(&n->list_lock);
1656 list_add(&slab->slab_list, &n->full);
1657 }
1658
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab)1659 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1660 {
1661 if (!(s->flags & SLAB_STORE_USER))
1662 return;
1663
1664 lockdep_assert_held(&n->list_lock);
1665 list_del(&slab->slab_list);
1666 }
1667
node_nr_slabs(struct kmem_cache_node * n)1668 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1669 {
1670 return atomic_long_read(&n->nr_slabs);
1671 }
1672
inc_slabs_node(struct kmem_cache * s,int node,int objects)1673 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1674 {
1675 struct kmem_cache_node *n = get_node(s, node);
1676
1677 atomic_long_inc(&n->nr_slabs);
1678 atomic_long_add(objects, &n->total_objects);
1679 }
dec_slabs_node(struct kmem_cache * s,int node,int objects)1680 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1681 {
1682 struct kmem_cache_node *n = get_node(s, node);
1683
1684 atomic_long_dec(&n->nr_slabs);
1685 atomic_long_sub(objects, &n->total_objects);
1686 }
1687
1688 /* Object debug checks for alloc/free paths */
setup_object_debug(struct kmem_cache * s,void * object)1689 static void setup_object_debug(struct kmem_cache *s, void *object)
1690 {
1691 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1692 return;
1693
1694 init_object(s, object, SLUB_RED_INACTIVE);
1695 init_tracking(s, object);
1696 }
1697
1698 static
setup_slab_debug(struct kmem_cache * s,struct slab * slab,void * addr)1699 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1700 {
1701 if (!kmem_cache_debug_flags(s, SLAB_POISON))
1702 return;
1703
1704 metadata_access_enable();
1705 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1706 metadata_access_disable();
1707 }
1708
alloc_consistency_checks(struct kmem_cache * s,struct slab * slab,void * object)1709 static inline int alloc_consistency_checks(struct kmem_cache *s,
1710 struct slab *slab, void *object)
1711 {
1712 if (!check_slab(s, slab))
1713 return 0;
1714
1715 if (!check_valid_pointer(s, slab, object)) {
1716 object_err(s, slab, object, "Freelist Pointer check fails");
1717 return 0;
1718 }
1719
1720 if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1721 return 0;
1722
1723 return 1;
1724 }
1725
alloc_debug_processing(struct kmem_cache * s,struct slab * slab,void * object,int orig_size)1726 static noinline bool alloc_debug_processing(struct kmem_cache *s,
1727 struct slab *slab, void *object, int orig_size)
1728 {
1729 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1730 if (!alloc_consistency_checks(s, slab, object))
1731 goto bad;
1732 }
1733
1734 /* Success. Perform special debug activities for allocs */
1735 trace(s, slab, object, 1);
1736 set_orig_size(s, object, orig_size);
1737 init_object(s, object, SLUB_RED_ACTIVE);
1738 return true;
1739
1740 bad:
1741 /*
1742 * Let's do the best we can to avoid issues in the future. Marking all
1743 * objects as used avoids touching the remaining objects.
1744 */
1745 slab_fix(s, "Marking all objects used");
1746 slab->inuse = slab->objects;
1747 slab->freelist = NULL;
1748 slab->frozen = 1; /* mark consistency-failed slab as frozen */
1749
1750 return false;
1751 }
1752
free_consistency_checks(struct kmem_cache * s,struct slab * slab,void * object,unsigned long addr)1753 static inline int free_consistency_checks(struct kmem_cache *s,
1754 struct slab *slab, void *object, unsigned long addr)
1755 {
1756 if (!check_valid_pointer(s, slab, object)) {
1757 slab_err(s, slab, "Invalid object pointer 0x%p", object);
1758 return 0;
1759 }
1760
1761 if (on_freelist(s, slab, object)) {
1762 object_err(s, slab, object, "Object already free");
1763 return 0;
1764 }
1765
1766 if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1767 return 0;
1768
1769 if (unlikely(s != slab->slab_cache)) {
1770 if (!slab->slab_cache) {
1771 slab_err(NULL, slab, "No slab cache for object 0x%p",
1772 object);
1773 } else {
1774 object_err(s, slab, object,
1775 "page slab pointer corrupt.");
1776 }
1777 return 0;
1778 }
1779 return 1;
1780 }
1781
1782 /*
1783 * Parse a block of slab_debug options. Blocks are delimited by ';'
1784 *
1785 * @str: start of block
1786 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1787 * @slabs: return start of list of slabs, or NULL when there's no list
1788 * @init: assume this is initial parsing and not per-kmem-create parsing
1789 *
1790 * returns the start of next block if there's any, or NULL
1791 */
1792 static const char *
parse_slub_debug_flags(const char * str,slab_flags_t * flags,const char ** slabs,bool init)1793 parse_slub_debug_flags(const char *str, slab_flags_t *flags, const char **slabs, bool init)
1794 {
1795 bool higher_order_disable = false;
1796
1797 /* Skip any completely empty blocks */
1798 while (*str && *str == ';')
1799 str++;
1800
1801 if (*str == ',') {
1802 /*
1803 * No options but restriction on slabs. This means full
1804 * debugging for slabs matching a pattern.
1805 */
1806 *flags = DEBUG_DEFAULT_FLAGS;
1807 goto check_slabs;
1808 }
1809 *flags = 0;
1810
1811 /* Determine which debug features should be switched on */
1812 for (; *str && *str != ',' && *str != ';'; str++) {
1813 switch (tolower(*str)) {
1814 case '-':
1815 *flags = 0;
1816 break;
1817 case 'f':
1818 *flags |= SLAB_CONSISTENCY_CHECKS;
1819 break;
1820 case 'z':
1821 *flags |= SLAB_RED_ZONE;
1822 break;
1823 case 'p':
1824 *flags |= SLAB_POISON;
1825 break;
1826 case 'u':
1827 *flags |= SLAB_STORE_USER;
1828 break;
1829 case 't':
1830 *flags |= SLAB_TRACE;
1831 break;
1832 case 'a':
1833 *flags |= SLAB_FAILSLAB;
1834 break;
1835 case 'o':
1836 /*
1837 * Avoid enabling debugging on caches if its minimum
1838 * order would increase as a result.
1839 */
1840 higher_order_disable = true;
1841 break;
1842 default:
1843 if (init)
1844 pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1845 }
1846 }
1847 check_slabs:
1848 if (*str == ',')
1849 *slabs = ++str;
1850 else
1851 *slabs = NULL;
1852
1853 /* Skip over the slab list */
1854 while (*str && *str != ';')
1855 str++;
1856
1857 /* Skip any completely empty blocks */
1858 while (*str && *str == ';')
1859 str++;
1860
1861 if (init && higher_order_disable)
1862 disable_higher_order_debug = 1;
1863
1864 if (*str)
1865 return str;
1866 else
1867 return NULL;
1868 }
1869
setup_slub_debug(const char * str,const struct kernel_param * kp)1870 static int __init setup_slub_debug(const char *str, const struct kernel_param *kp)
1871 {
1872 slab_flags_t flags;
1873 slab_flags_t global_flags;
1874 const char *saved_str;
1875 const char *slab_list;
1876 bool global_slub_debug_changed = false;
1877 bool slab_list_specified = false;
1878
1879 global_flags = DEBUG_DEFAULT_FLAGS;
1880 if (!str || !*str)
1881 /*
1882 * No options specified. Switch on full debugging.
1883 */
1884 goto out;
1885
1886 saved_str = str;
1887 while (str) {
1888 str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1889
1890 if (!slab_list) {
1891 global_flags = flags;
1892 global_slub_debug_changed = true;
1893 } else {
1894 slab_list_specified = true;
1895 if (flags & SLAB_STORE_USER)
1896 stack_depot_request_early_init();
1897 }
1898 }
1899
1900 /*
1901 * For backwards compatibility, a single list of flags with list of
1902 * slabs means debugging is only changed for those slabs, so the global
1903 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1904 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1905 * long as there is no option specifying flags without a slab list.
1906 */
1907 if (slab_list_specified) {
1908 if (!global_slub_debug_changed)
1909 global_flags = slub_debug;
1910 slub_debug_string = saved_str;
1911 }
1912 out:
1913 slub_debug = global_flags;
1914 if (slub_debug & SLAB_STORE_USER)
1915 stack_depot_request_early_init();
1916 if (slub_debug != 0 || slub_debug_string)
1917 static_branch_enable(&slub_debug_enabled);
1918 else
1919 static_branch_disable(&slub_debug_enabled);
1920 if ((static_branch_unlikely(&init_on_alloc) ||
1921 static_branch_unlikely(&init_on_free)) &&
1922 (slub_debug & SLAB_POISON))
1923 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1924 return 0;
1925 }
1926
1927 static const struct kernel_param_ops param_ops_slab_debug __initconst = {
1928 .flags = KERNEL_PARAM_OPS_FL_NOARG,
1929 .set = setup_slub_debug,
1930 };
1931 __core_param_cb(slab_debug, ¶m_ops_slab_debug, NULL, 0);
1932 __core_param_cb(slub_debug, ¶m_ops_slab_debug, NULL, 0);
1933
1934 /*
1935 * kmem_cache_flags - apply debugging options to the cache
1936 * @flags: flags to set
1937 * @name: name of the cache
1938 *
1939 * Debug option(s) are applied to @flags. In addition to the debug
1940 * option(s), if a slab name (or multiple) is specified i.e.
1941 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1942 * then only the select slabs will receive the debug option(s).
1943 */
kmem_cache_flags(slab_flags_t flags,const char * name)1944 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1945 {
1946 const char *iter;
1947 size_t len;
1948 const char *next_block;
1949 slab_flags_t block_flags;
1950 slab_flags_t slub_debug_local = slub_debug;
1951
1952 if (flags & SLAB_NO_USER_FLAGS)
1953 return flags;
1954
1955 /*
1956 * If the slab cache is for debugging (e.g. kmemleak) then
1957 * don't store user (stack trace) information by default,
1958 * but let the user enable it via the command line below.
1959 */
1960 if (flags & SLAB_NOLEAKTRACE)
1961 slub_debug_local &= ~SLAB_STORE_USER;
1962
1963 len = strlen(name);
1964 next_block = slub_debug_string;
1965 /* Go through all blocks of debug options, see if any matches our slab's name */
1966 while (next_block) {
1967 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1968 if (!iter)
1969 continue;
1970 /* Found a block that has a slab list, search it */
1971 while (*iter) {
1972 const char *end, *glob;
1973 size_t cmplen;
1974
1975 end = strchrnul(iter, ',');
1976 if (next_block && next_block < end)
1977 end = next_block - 1;
1978
1979 glob = strnchr(iter, end - iter, '*');
1980 if (glob)
1981 cmplen = glob - iter;
1982 else
1983 cmplen = max_t(size_t, len, (end - iter));
1984
1985 if (!strncmp(name, iter, cmplen)) {
1986 flags |= block_flags;
1987 return flags;
1988 }
1989
1990 if (!*end || *end == ';')
1991 break;
1992 iter = end + 1;
1993 }
1994 }
1995
1996 return flags | slub_debug_local;
1997 }
1998 #else /* !CONFIG_SLUB_DEBUG */
setup_object_debug(struct kmem_cache * s,void * object)1999 static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
2000 static inline
setup_slab_debug(struct kmem_cache * s,struct slab * slab,void * addr)2001 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
2002
alloc_debug_processing(struct kmem_cache * s,struct slab * slab,void * object,int orig_size)2003 static inline bool alloc_debug_processing(struct kmem_cache *s,
2004 struct slab *slab, void *object, int orig_size) { return true; }
2005
free_debug_processing(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int * bulk_cnt,unsigned long addr,depot_stack_handle_t handle)2006 static inline bool free_debug_processing(struct kmem_cache *s,
2007 struct slab *slab, void *head, void *tail, int *bulk_cnt,
2008 unsigned long addr, depot_stack_handle_t handle) { return true; }
2009
slab_pad_check(struct kmem_cache * s,struct slab * slab)2010 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
check_object(struct kmem_cache * s,struct slab * slab,void * object,u8 val)2011 static inline int check_object(struct kmem_cache *s, struct slab *slab,
2012 void *object, u8 val) { return 1; }
set_track_prepare(gfp_t gfp_flags)2013 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
set_track(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr,gfp_t gfp_flags)2014 static inline void set_track(struct kmem_cache *s, void *object,
2015 enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab)2016 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
2017 struct slab *slab) {}
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab)2018 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
2019 struct slab *slab) {}
kmem_cache_flags(slab_flags_t flags,const char * name)2020 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
2021 {
2022 return flags;
2023 }
2024 #define slub_debug 0
2025
2026 #define disable_higher_order_debug 0
2027
node_nr_slabs(struct kmem_cache_node * n)2028 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
2029 { return 0; }
inc_slabs_node(struct kmem_cache * s,int node,int objects)2030 static inline void inc_slabs_node(struct kmem_cache *s, int node,
2031 int objects) {}
dec_slabs_node(struct kmem_cache * s,int node,int objects)2032 static inline void dec_slabs_node(struct kmem_cache *s, int node,
2033 int objects) {}
2034 #endif /* CONFIG_SLUB_DEBUG */
2035
2036 /*
2037 * The allocated objcg pointers array is not accounted directly.
2038 * Moreover, it should not come from DMA buffer and is not readily
2039 * reclaimable. So those GFP bits should be masked off.
2040 */
2041 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
2042 __GFP_ACCOUNT | __GFP_NOFAIL)
2043
2044 #ifdef CONFIG_SLAB_OBJ_EXT
2045
2046 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
2047
mark_obj_codetag_empty(const void * obj)2048 static inline void mark_obj_codetag_empty(const void *obj)
2049 {
2050 struct slab *obj_slab;
2051 unsigned long slab_exts;
2052
2053 obj_slab = virt_to_slab(obj);
2054 slab_exts = slab_obj_exts(obj_slab);
2055 if (slab_exts) {
2056 get_slab_obj_exts(slab_exts);
2057 unsigned int offs = obj_to_index(obj_slab->slab_cache,
2058 obj_slab, obj);
2059 struct slabobj_ext *ext = slab_obj_ext(obj_slab,
2060 slab_exts, offs);
2061
2062 if (unlikely(is_codetag_empty(&ext->ref))) {
2063 put_slab_obj_exts(slab_exts);
2064 return;
2065 }
2066
2067 /* codetag should be NULL here */
2068 WARN_ON(ext->ref.ct);
2069 set_codetag_empty(&ext->ref);
2070 put_slab_obj_exts(slab_exts);
2071 }
2072 }
2073
mark_failed_objexts_alloc(struct slab * slab)2074 static inline bool mark_failed_objexts_alloc(struct slab *slab)
2075 {
2076 return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
2077 }
2078
handle_failed_objexts_alloc(unsigned long obj_exts,struct slabobj_ext * vec,unsigned int objects)2079 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
2080 struct slabobj_ext *vec, unsigned int objects)
2081 {
2082 /*
2083 * If vector previously failed to allocate then we have live
2084 * objects with no tag reference. Mark all references in this
2085 * vector as empty to avoid warnings later on.
2086 */
2087 if (obj_exts == OBJEXTS_ALLOC_FAIL) {
2088 unsigned int i;
2089
2090 for (i = 0; i < objects; i++)
2091 set_codetag_empty(&vec[i].ref);
2092 }
2093 }
2094
2095 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
2096
mark_obj_codetag_empty(const void * obj)2097 static inline void mark_obj_codetag_empty(const void *obj) {}
mark_failed_objexts_alloc(struct slab * slab)2098 static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
handle_failed_objexts_alloc(unsigned long obj_exts,struct slabobj_ext * vec,unsigned int objects)2099 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
2100 struct slabobj_ext *vec, unsigned int objects) {}
2101
2102 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
2103
init_slab_obj_exts(struct slab * slab)2104 static inline void init_slab_obj_exts(struct slab *slab)
2105 {
2106 slab->obj_exts = 0;
2107 }
2108
2109 /*
2110 * Calculate the allocation size for slabobj_ext array.
2111 *
2112 * When memory allocation profiling is enabled, the obj_exts array
2113 * could be allocated from the same slab cache it's being allocated for.
2114 * This would prevent the slab from ever being freed because it would
2115 * always contain at least one allocated object (its own obj_exts array).
2116 *
2117 * To avoid this, increase the allocation size when we detect the array
2118 * may come from the same cache, forcing it to use a different cache.
2119 */
obj_exts_alloc_size(struct kmem_cache * s,struct slab * slab,gfp_t gfp)2120 static inline size_t obj_exts_alloc_size(struct kmem_cache *s,
2121 struct slab *slab, gfp_t gfp)
2122 {
2123 size_t sz = sizeof(struct slabobj_ext) * slab->objects;
2124 struct kmem_cache *obj_exts_cache;
2125
2126 if (sz > KMALLOC_MAX_CACHE_SIZE)
2127 return sz;
2128
2129 if (!is_kmalloc_normal(s))
2130 return sz;
2131
2132 obj_exts_cache = kmalloc_slab(sz, NULL, gfp, 0);
2133 /*
2134 * We can't simply compare s with obj_exts_cache, because random kmalloc
2135 * caches have multiple caches per size, selected by caller address.
2136 * Since caller address may differ between kmalloc_slab() and actual
2137 * allocation, bump size when sizes are equal.
2138 */
2139 if (s->object_size == obj_exts_cache->object_size)
2140 return obj_exts_cache->object_size + 1;
2141
2142 return sz;
2143 }
2144
alloc_slab_obj_exts(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab)2145 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2146 gfp_t gfp, bool new_slab)
2147 {
2148 bool allow_spin = gfpflags_allow_spinning(gfp);
2149 unsigned int objects = objs_per_slab(s, slab);
2150 unsigned long new_exts;
2151 unsigned long old_exts;
2152 struct slabobj_ext *vec;
2153 size_t sz;
2154
2155 gfp &= ~OBJCGS_CLEAR_MASK;
2156 /* Prevent recursive extension vector allocation */
2157 gfp |= __GFP_NO_OBJ_EXT;
2158
2159 sz = obj_exts_alloc_size(s, slab, gfp);
2160
2161 /*
2162 * Note that allow_spin may be false during early boot and its
2163 * restricted GFP_BOOT_MASK. Due to kmalloc_nolock() only supporting
2164 * architectures with cmpxchg16b, early obj_exts will be missing for
2165 * very early allocations on those.
2166 */
2167 if (unlikely(!allow_spin))
2168 vec = kmalloc_nolock(sz, __GFP_ZERO | __GFP_NO_OBJ_EXT,
2169 slab_nid(slab));
2170 else
2171 vec = kmalloc_node(sz, gfp | __GFP_ZERO, slab_nid(slab));
2172
2173 if (!vec) {
2174 /*
2175 * Try to mark vectors which failed to allocate.
2176 * If this operation fails, there may be a racing process
2177 * that has already completed the allocation.
2178 */
2179 if (!mark_failed_objexts_alloc(slab) &&
2180 slab_obj_exts(slab))
2181 return 0;
2182
2183 return -ENOMEM;
2184 }
2185
2186 VM_WARN_ON_ONCE(virt_to_slab(vec) != NULL &&
2187 virt_to_slab(vec)->slab_cache == s);
2188
2189 new_exts = (unsigned long)vec;
2190 #ifdef CONFIG_MEMCG
2191 new_exts |= MEMCG_DATA_OBJEXTS;
2192 #endif
2193 retry:
2194 old_exts = READ_ONCE(slab->obj_exts);
2195 handle_failed_objexts_alloc(old_exts, vec, objects);
2196
2197 if (new_slab) {
2198 /*
2199 * If the slab is brand new and nobody can yet access its
2200 * obj_exts, no synchronization is required and obj_exts can
2201 * be simply assigned.
2202 */
2203 slab->obj_exts = new_exts;
2204 } else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
2205 /*
2206 * If the slab is already in use, somebody can allocate and
2207 * assign slabobj_exts in parallel. In this case the existing
2208 * objcg vector should be reused.
2209 */
2210 mark_obj_codetag_empty(vec);
2211 if (unlikely(!allow_spin))
2212 kfree_nolock(vec);
2213 else
2214 kfree(vec);
2215 return 0;
2216 } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
2217 /* Retry if a racing thread changed slab->obj_exts from under us. */
2218 goto retry;
2219 }
2220
2221 if (allow_spin)
2222 kmemleak_not_leak(vec);
2223 return 0;
2224 }
2225
free_slab_obj_exts(struct slab * slab,bool allow_spin)2226 static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
2227 {
2228 struct slabobj_ext *obj_exts;
2229
2230 obj_exts = (struct slabobj_ext *)slab_obj_exts(slab);
2231 if (!obj_exts) {
2232 /*
2233 * If obj_exts allocation failed, slab->obj_exts is set to
2234 * OBJEXTS_ALLOC_FAIL. In this case, we end up here and should
2235 * clear the flag.
2236 */
2237 slab->obj_exts = 0;
2238 return;
2239 }
2240
2241 if (obj_exts_in_slab(slab->slab_cache, slab)) {
2242 slab->obj_exts = 0;
2243 return;
2244 }
2245
2246 /*
2247 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
2248 * corresponding extension will be NULL. alloc_tag_sub() will throw a
2249 * warning if slab has extensions but the extension of an object is
2250 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
2251 * the extension for obj_exts is expected to be NULL.
2252 */
2253 mark_obj_codetag_empty(obj_exts);
2254 if (allow_spin)
2255 kfree(obj_exts);
2256 else
2257 kfree_nolock(obj_exts);
2258 slab->obj_exts = 0;
2259 }
2260
2261 /*
2262 * Try to allocate slabobj_ext array from unused space.
2263 * This function must be called on a freshly allocated slab to prevent
2264 * concurrency problems.
2265 */
alloc_slab_obj_exts_early(struct kmem_cache * s,struct slab * slab)2266 static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
2267 {
2268 void *addr;
2269 unsigned long obj_exts;
2270
2271 /* Initialize stride early to avoid memory ordering issues */
2272 slab_set_stride(slab, sizeof(struct slabobj_ext));
2273
2274 if (!need_slab_obj_exts(s))
2275 return;
2276
2277 if (obj_exts_fit_within_slab_leftover(s, slab)) {
2278 addr = slab_address(slab) + obj_exts_offset_in_slab(s, slab);
2279 addr = kasan_reset_tag(addr);
2280 obj_exts = (unsigned long)addr;
2281
2282 get_slab_obj_exts(obj_exts);
2283 memset(addr, 0, obj_exts_size_in_slab(slab));
2284 put_slab_obj_exts(obj_exts);
2285
2286 #ifdef CONFIG_MEMCG
2287 obj_exts |= MEMCG_DATA_OBJEXTS;
2288 #endif
2289 slab->obj_exts = obj_exts;
2290 } else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
2291 unsigned int offset = obj_exts_offset_in_object(s);
2292
2293 obj_exts = (unsigned long)slab_address(slab);
2294 obj_exts += s->red_left_pad;
2295 obj_exts += offset;
2296
2297 get_slab_obj_exts(obj_exts);
2298 for_each_object(addr, s, slab_address(slab), slab->objects)
2299 memset(kasan_reset_tag(addr) + offset, 0,
2300 sizeof(struct slabobj_ext));
2301 put_slab_obj_exts(obj_exts);
2302
2303 #ifdef CONFIG_MEMCG
2304 obj_exts |= MEMCG_DATA_OBJEXTS;
2305 #endif
2306 slab->obj_exts = obj_exts;
2307 slab_set_stride(slab, s->size);
2308 }
2309 }
2310
2311 #else /* CONFIG_SLAB_OBJ_EXT */
2312
mark_obj_codetag_empty(const void * obj)2313 static inline void mark_obj_codetag_empty(const void *obj)
2314 {
2315 }
2316
init_slab_obj_exts(struct slab * slab)2317 static inline void init_slab_obj_exts(struct slab *slab)
2318 {
2319 }
2320
alloc_slab_obj_exts(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab)2321 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2322 gfp_t gfp, bool new_slab)
2323 {
2324 return 0;
2325 }
2326
free_slab_obj_exts(struct slab * slab,bool allow_spin)2327 static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
2328 {
2329 }
2330
alloc_slab_obj_exts_early(struct kmem_cache * s,struct slab * slab)2331 static inline void alloc_slab_obj_exts_early(struct kmem_cache *s,
2332 struct slab *slab)
2333 {
2334 }
2335
2336 #endif /* CONFIG_SLAB_OBJ_EXT */
2337
2338 #ifdef CONFIG_MEM_ALLOC_PROFILING
2339
2340 static inline unsigned long
prepare_slab_obj_exts_hook(struct kmem_cache * s,struct slab * slab,gfp_t flags,void * p)2341 prepare_slab_obj_exts_hook(struct kmem_cache *s, struct slab *slab,
2342 gfp_t flags, void *p)
2343 {
2344 if (!slab_obj_exts(slab) &&
2345 alloc_slab_obj_exts(slab, s, flags, false)) {
2346 pr_warn_once("%s, %s: Failed to create slab extension vector!\n",
2347 __func__, s->name);
2348 return 0;
2349 }
2350
2351 return slab_obj_exts(slab);
2352 }
2353
2354
2355 /* Should be called only if mem_alloc_profiling_enabled() */
2356 static noinline void
__alloc_tagging_slab_alloc_hook(struct kmem_cache * s,void * object,gfp_t flags)2357 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2358 {
2359 unsigned long obj_exts;
2360 struct slabobj_ext *obj_ext;
2361 struct slab *slab;
2362
2363 if (!object)
2364 return;
2365
2366 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2367 return;
2368
2369 if (flags & __GFP_NO_OBJ_EXT)
2370 return;
2371
2372 slab = virt_to_slab(object);
2373 obj_exts = prepare_slab_obj_exts_hook(s, slab, flags, object);
2374 /*
2375 * Currently obj_exts is used only for allocation profiling.
2376 * If other users appear then mem_alloc_profiling_enabled()
2377 * check should be added before alloc_tag_add().
2378 */
2379 if (obj_exts) {
2380 unsigned int obj_idx = obj_to_index(s, slab, object);
2381
2382 get_slab_obj_exts(obj_exts);
2383 obj_ext = slab_obj_ext(slab, obj_exts, obj_idx);
2384 alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
2385 put_slab_obj_exts(obj_exts);
2386 } else {
2387 alloc_tag_set_inaccurate(current->alloc_tag);
2388 }
2389 }
2390
2391 static inline void
alloc_tagging_slab_alloc_hook(struct kmem_cache * s,void * object,gfp_t flags)2392 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2393 {
2394 if (mem_alloc_profiling_enabled())
2395 __alloc_tagging_slab_alloc_hook(s, object, flags);
2396 }
2397
2398 /* Should be called only if mem_alloc_profiling_enabled() */
2399 static noinline void
__alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2400 __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2401 int objects)
2402 {
2403 int i;
2404 unsigned long obj_exts;
2405
2406 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
2407 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2408 return;
2409
2410 obj_exts = slab_obj_exts(slab);
2411 if (!obj_exts)
2412 return;
2413
2414 get_slab_obj_exts(obj_exts);
2415 for (i = 0; i < objects; i++) {
2416 unsigned int off = obj_to_index(s, slab, p[i]);
2417
2418 alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
2419 }
2420 put_slab_obj_exts(obj_exts);
2421 }
2422
2423 static inline void
alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2424 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2425 int objects)
2426 {
2427 if (mem_alloc_profiling_enabled())
2428 __alloc_tagging_slab_free_hook(s, slab, p, objects);
2429 }
2430
2431 #else /* CONFIG_MEM_ALLOC_PROFILING */
2432
2433 static inline void
alloc_tagging_slab_alloc_hook(struct kmem_cache * s,void * object,gfp_t flags)2434 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2435 {
2436 }
2437
2438 static inline void
alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2439 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2440 int objects)
2441 {
2442 }
2443
2444 #endif /* CONFIG_MEM_ALLOC_PROFILING */
2445
2446
2447 #ifdef CONFIG_MEMCG
2448
2449 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
2450
2451 static __fastpath_inline
memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)2452 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2453 gfp_t flags, size_t size, void **p)
2454 {
2455 if (likely(!memcg_kmem_online()))
2456 return true;
2457
2458 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2459 return true;
2460
2461 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
2462 return true;
2463
2464 if (likely(size == 1)) {
2465 memcg_alloc_abort_single(s, *p);
2466 *p = NULL;
2467 } else {
2468 kmem_cache_free_bulk(s, size, p);
2469 }
2470
2471 return false;
2472 }
2473
2474 static __fastpath_inline
memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2475 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2476 int objects)
2477 {
2478 unsigned long obj_exts;
2479
2480 if (!memcg_kmem_online())
2481 return;
2482
2483 obj_exts = slab_obj_exts(slab);
2484 if (likely(!obj_exts))
2485 return;
2486
2487 get_slab_obj_exts(obj_exts);
2488 __memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2489 put_slab_obj_exts(obj_exts);
2490 }
2491
2492 static __fastpath_inline
memcg_slab_post_charge(void * p,gfp_t flags)2493 bool memcg_slab_post_charge(void *p, gfp_t flags)
2494 {
2495 unsigned long obj_exts;
2496 struct slabobj_ext *obj_ext;
2497 struct kmem_cache *s;
2498 struct page *page;
2499 struct slab *slab;
2500 unsigned long off;
2501
2502 page = virt_to_page(p);
2503 if (PageLargeKmalloc(page)) {
2504 unsigned int order;
2505 int size;
2506
2507 if (PageMemcgKmem(page))
2508 return true;
2509
2510 order = large_kmalloc_order(page);
2511 if (__memcg_kmem_charge_page(page, flags, order))
2512 return false;
2513
2514 /*
2515 * This page has already been accounted in the global stats but
2516 * not in the memcg stats. So, subtract from the global and use
2517 * the interface which adds to both global and memcg stats.
2518 */
2519 size = PAGE_SIZE << order;
2520 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -size);
2521 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, size);
2522 return true;
2523 }
2524
2525 slab = page_slab(page);
2526 s = slab->slab_cache;
2527
2528 /*
2529 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
2530 * of slab_obj_exts being allocated from the same slab and thus the slab
2531 * becoming effectively unfreeable.
2532 */
2533 if (is_kmalloc_normal(s))
2534 return true;
2535
2536 /* Ignore already charged objects. */
2537 obj_exts = slab_obj_exts(slab);
2538 if (obj_exts) {
2539 get_slab_obj_exts(obj_exts);
2540 off = obj_to_index(s, slab, p);
2541 obj_ext = slab_obj_ext(slab, obj_exts, off);
2542 if (unlikely(obj_ext->objcg)) {
2543 put_slab_obj_exts(obj_exts);
2544 return true;
2545 }
2546 put_slab_obj_exts(obj_exts);
2547 }
2548
2549 return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
2550 }
2551
2552 #else /* CONFIG_MEMCG */
memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)2553 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
2554 struct list_lru *lru,
2555 gfp_t flags, size_t size,
2556 void **p)
2557 {
2558 return true;
2559 }
2560
memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2561 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2562 void **p, int objects)
2563 {
2564 }
2565
memcg_slab_post_charge(void * p,gfp_t flags)2566 static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
2567 {
2568 return true;
2569 }
2570 #endif /* CONFIG_MEMCG */
2571
2572 #ifdef CONFIG_SLUB_RCU_DEBUG
2573 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
2574
2575 struct rcu_delayed_free {
2576 struct rcu_head head;
2577 void *object;
2578 };
2579 #endif
2580
2581 /*
2582 * Hooks for other subsystems that check memory allocations. In a typical
2583 * production configuration these hooks all should produce no code at all.
2584 *
2585 * Returns true if freeing of the object can proceed, false if its reuse
2586 * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
2587 * to KFENCE.
2588 *
2589 * For objects allocated via kmalloc_nolock(), only a subset of alloc hooks
2590 * are invoked, so some free hooks must handle asymmetric hook calls.
2591 *
2592 * Alloc hooks called for kmalloc_nolock():
2593 * - kmsan_slab_alloc()
2594 * - kasan_slab_alloc()
2595 * - memcg_slab_post_alloc_hook()
2596 * - alloc_tagging_slab_alloc_hook()
2597 *
2598 * Free hooks that must handle missing corresponding alloc hooks:
2599 * - kmemleak_free_recursive()
2600 * - kfence_free()
2601 *
2602 * Free hooks that have no alloc hook counterpart, and thus safe to call:
2603 * - debug_check_no_locks_freed()
2604 * - debug_check_no_obj_freed()
2605 * - __kcsan_check_access()
2606 */
2607 static __always_inline
slab_free_hook(struct kmem_cache * s,void * x,bool init,bool after_rcu_delay)2608 bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
2609 bool after_rcu_delay)
2610 {
2611 /* Are the object contents still accessible? */
2612 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
2613
2614 kmemleak_free_recursive(x, s->flags);
2615 kmsan_slab_free(s, x);
2616
2617 debug_check_no_locks_freed(x, s->object_size);
2618
2619 if (!(s->flags & SLAB_DEBUG_OBJECTS))
2620 debug_check_no_obj_freed(x, s->object_size);
2621
2622 /* Use KCSAN to help debug racy use-after-free. */
2623 if (!still_accessible)
2624 __kcsan_check_access(x, s->object_size,
2625 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2626
2627 if (kfence_free(x))
2628 return false;
2629
2630 /*
2631 * Give KASAN a chance to notice an invalid free operation before we
2632 * modify the object.
2633 */
2634 if (kasan_slab_pre_free(s, x))
2635 return false;
2636
2637 #ifdef CONFIG_SLUB_RCU_DEBUG
2638 if (still_accessible) {
2639 struct rcu_delayed_free *delayed_free;
2640
2641 delayed_free = kmalloc_obj(*delayed_free, GFP_NOWAIT);
2642 if (delayed_free) {
2643 /*
2644 * Let KASAN track our call stack as a "related work
2645 * creation", just like if the object had been freed
2646 * normally via kfree_rcu().
2647 * We have to do this manually because the rcu_head is
2648 * not located inside the object.
2649 */
2650 kasan_record_aux_stack(x);
2651
2652 delayed_free->object = x;
2653 call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
2654 return false;
2655 }
2656 }
2657 #endif /* CONFIG_SLUB_RCU_DEBUG */
2658
2659 /*
2660 * As memory initialization might be integrated into KASAN,
2661 * kasan_slab_free and initialization memset's must be
2662 * kept together to avoid discrepancies in behavior.
2663 *
2664 * The initialization memset's clear the object and the metadata,
2665 * but don't touch the SLAB redzone.
2666 *
2667 * The object's freepointer is also avoided if stored outside the
2668 * object.
2669 */
2670 if (unlikely(init)) {
2671 int rsize;
2672 unsigned int inuse, orig_size;
2673
2674 inuse = get_info_end(s);
2675 orig_size = get_orig_size(s, x);
2676 if (!kasan_has_integrated_init())
2677 memset(kasan_reset_tag(x), 0, orig_size);
2678 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2679 memset((char *)kasan_reset_tag(x) + inuse, 0,
2680 s->size - inuse - rsize);
2681 /*
2682 * Restore orig_size, otherwise kmalloc redzone overwritten
2683 * would be reported
2684 */
2685 set_orig_size(s, x, orig_size);
2686
2687 }
2688 /* KASAN might put x into memory quarantine, delaying its reuse. */
2689 return !kasan_slab_free(s, x, init, still_accessible, false);
2690 }
2691
2692 static __fastpath_inline
slab_free_freelist_hook(struct kmem_cache * s,void ** head,void ** tail,int * cnt)2693 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
2694 int *cnt)
2695 {
2696
2697 void *object;
2698 void *next = *head;
2699 void *old_tail = *tail;
2700 bool init;
2701
2702 if (is_kfence_address(next)) {
2703 slab_free_hook(s, next, false, false);
2704 return false;
2705 }
2706
2707 /* Head and tail of the reconstructed freelist */
2708 *head = NULL;
2709 *tail = NULL;
2710
2711 init = slab_want_init_on_free(s);
2712
2713 do {
2714 object = next;
2715 next = get_freepointer(s, object);
2716
2717 /* If object's reuse doesn't have to be delayed */
2718 if (likely(slab_free_hook(s, object, init, false))) {
2719 /* Move object to the new freelist */
2720 set_freepointer(s, object, *head);
2721 *head = object;
2722 if (!*tail)
2723 *tail = object;
2724 } else {
2725 /*
2726 * Adjust the reconstructed freelist depth
2727 * accordingly if object's reuse is delayed.
2728 */
2729 --(*cnt);
2730 }
2731 } while (object != old_tail);
2732
2733 return *head != NULL;
2734 }
2735
setup_object(struct kmem_cache * s,void * object)2736 static void *setup_object(struct kmem_cache *s, void *object)
2737 {
2738 setup_object_debug(s, object);
2739 object = kasan_init_slab_obj(s, object);
2740 if (unlikely(s->ctor)) {
2741 kasan_unpoison_new_object(s, object);
2742 s->ctor(object);
2743 kasan_poison_new_object(s, object);
2744 }
2745 return object;
2746 }
2747
__alloc_empty_sheaf(struct kmem_cache * s,gfp_t gfp,unsigned int capacity)2748 static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp,
2749 unsigned int capacity)
2750 {
2751 struct slab_sheaf *sheaf;
2752 size_t sheaf_size;
2753
2754 if (gfp & __GFP_NO_OBJ_EXT)
2755 return NULL;
2756
2757 gfp &= ~OBJCGS_CLEAR_MASK;
2758
2759 /*
2760 * Prevent recursion to the same cache, or a deep stack of kmallocs of
2761 * varying sizes (sheaf capacity might differ for each kmalloc size
2762 * bucket)
2763 */
2764 if (s->flags & SLAB_KMALLOC)
2765 gfp |= __GFP_NO_OBJ_EXT;
2766
2767 sheaf_size = struct_size(sheaf, objects, capacity);
2768 sheaf = kzalloc(sheaf_size, gfp);
2769
2770 if (unlikely(!sheaf))
2771 return NULL;
2772
2773 sheaf->cache = s;
2774
2775 stat(s, SHEAF_ALLOC);
2776
2777 return sheaf;
2778 }
2779
alloc_empty_sheaf(struct kmem_cache * s,gfp_t gfp)2780 static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
2781 gfp_t gfp)
2782 {
2783 return __alloc_empty_sheaf(s, gfp, s->sheaf_capacity);
2784 }
2785
free_empty_sheaf(struct kmem_cache * s,struct slab_sheaf * sheaf)2786 static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
2787 {
2788 /*
2789 * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
2790 * corresponding extension is NULL and alloc_tag_sub() will throw a
2791 * warning, therefore replace NULL with CODETAG_EMPTY to indicate
2792 * that the extension for this sheaf is expected to be NULL.
2793 */
2794 if (s->flags & SLAB_KMALLOC)
2795 mark_obj_codetag_empty(sheaf);
2796
2797 VM_WARN_ON_ONCE(sheaf->size > 0);
2798 kfree(sheaf);
2799
2800 stat(s, SHEAF_FREE);
2801 }
2802
2803 static unsigned int
2804 refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
2805 unsigned int max);
2806
refill_sheaf(struct kmem_cache * s,struct slab_sheaf * sheaf,gfp_t gfp)2807 static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf,
2808 gfp_t gfp)
2809 {
2810 int to_fill = s->sheaf_capacity - sheaf->size;
2811 int filled;
2812
2813 if (!to_fill)
2814 return 0;
2815
2816 filled = refill_objects(s, &sheaf->objects[sheaf->size], gfp, to_fill,
2817 to_fill);
2818
2819 sheaf->size += filled;
2820
2821 stat_add(s, SHEAF_REFILL, filled);
2822
2823 if (filled < to_fill)
2824 return -ENOMEM;
2825
2826 return 0;
2827 }
2828
2829 /*
2830 * Maximum number of objects freed during a single flush of main pcs sheaf.
2831 * Translates directly to an on-stack array size.
2832 */
2833 #define PCS_BATCH_MAX 32U
2834
2835 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
2836
2837 /*
2838 * Free all objects from the main sheaf. In order to perform
2839 * __kmem_cache_free_bulk() outside of cpu_sheaves->lock, work in batches where
2840 * object pointers are moved to a on-stack array under the lock. To bound the
2841 * stack usage, limit each batch to PCS_BATCH_MAX.
2842 *
2843 * Must be called with s->cpu_sheaves->lock locked, returns with the lock
2844 * unlocked.
2845 *
2846 * Returns how many objects are remaining to be flushed
2847 */
__sheaf_flush_main_batch(struct kmem_cache * s)2848 static unsigned int __sheaf_flush_main_batch(struct kmem_cache *s)
2849 {
2850 struct slub_percpu_sheaves *pcs;
2851 unsigned int batch, remaining;
2852 void *objects[PCS_BATCH_MAX];
2853 struct slab_sheaf *sheaf;
2854
2855 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
2856
2857 pcs = this_cpu_ptr(s->cpu_sheaves);
2858 sheaf = pcs->main;
2859
2860 batch = min(PCS_BATCH_MAX, sheaf->size);
2861
2862 sheaf->size -= batch;
2863 memcpy(objects, sheaf->objects + sheaf->size, batch * sizeof(void *));
2864
2865 remaining = sheaf->size;
2866
2867 local_unlock(&s->cpu_sheaves->lock);
2868
2869 __kmem_cache_free_bulk(s, batch, &objects[0]);
2870
2871 stat_add(s, SHEAF_FLUSH, batch);
2872
2873 return remaining;
2874 }
2875
sheaf_flush_main(struct kmem_cache * s)2876 static void sheaf_flush_main(struct kmem_cache *s)
2877 {
2878 unsigned int remaining;
2879
2880 do {
2881 local_lock(&s->cpu_sheaves->lock);
2882
2883 remaining = __sheaf_flush_main_batch(s);
2884
2885 } while (remaining);
2886 }
2887
2888 /*
2889 * Returns true if the main sheaf was at least partially flushed.
2890 */
sheaf_try_flush_main(struct kmem_cache * s)2891 static bool sheaf_try_flush_main(struct kmem_cache *s)
2892 {
2893 unsigned int remaining;
2894 bool ret = false;
2895
2896 do {
2897 if (!local_trylock(&s->cpu_sheaves->lock))
2898 return ret;
2899
2900 ret = true;
2901 remaining = __sheaf_flush_main_batch(s);
2902
2903 } while (remaining);
2904
2905 return ret;
2906 }
2907
2908 /*
2909 * Free all objects from a sheaf that's unused, i.e. not linked to any
2910 * cpu_sheaves, so we need no locking and batching. The locking is also not
2911 * necessary when flushing cpu's sheaves (both spare and main) during cpu
2912 * hotremove as the cpu is not executing anymore.
2913 */
sheaf_flush_unused(struct kmem_cache * s,struct slab_sheaf * sheaf)2914 static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf)
2915 {
2916 if (!sheaf->size)
2917 return;
2918
2919 stat_add(s, SHEAF_FLUSH, sheaf->size);
2920
2921 __kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
2922
2923 sheaf->size = 0;
2924 }
2925
__rcu_free_sheaf_prepare(struct kmem_cache * s,struct slab_sheaf * sheaf)2926 static bool __rcu_free_sheaf_prepare(struct kmem_cache *s,
2927 struct slab_sheaf *sheaf)
2928 {
2929 bool init = slab_want_init_on_free(s);
2930 void **p = &sheaf->objects[0];
2931 unsigned int i = 0;
2932 bool pfmemalloc = false;
2933
2934 while (i < sheaf->size) {
2935 struct slab *slab = virt_to_slab(p[i]);
2936
2937 memcg_slab_free_hook(s, slab, p + i, 1);
2938 alloc_tagging_slab_free_hook(s, slab, p + i, 1);
2939
2940 if (unlikely(!slab_free_hook(s, p[i], init, true))) {
2941 p[i] = p[--sheaf->size];
2942 continue;
2943 }
2944
2945 if (slab_test_pfmemalloc(slab))
2946 pfmemalloc = true;
2947
2948 i++;
2949 }
2950
2951 return pfmemalloc;
2952 }
2953
rcu_free_sheaf_nobarn(struct rcu_head * head)2954 static void rcu_free_sheaf_nobarn(struct rcu_head *head)
2955 {
2956 struct slab_sheaf *sheaf;
2957 struct kmem_cache *s;
2958
2959 sheaf = container_of(head, struct slab_sheaf, rcu_head);
2960 s = sheaf->cache;
2961
2962 __rcu_free_sheaf_prepare(s, sheaf);
2963
2964 sheaf_flush_unused(s, sheaf);
2965
2966 free_empty_sheaf(s, sheaf);
2967 }
2968
2969 /*
2970 * Caller needs to make sure migration is disabled in order to fully flush
2971 * single cpu's sheaves
2972 *
2973 * must not be called from an irq
2974 *
2975 * flushing operations are rare so let's keep it simple and flush to slabs
2976 * directly, skipping the barn
2977 */
pcs_flush_all(struct kmem_cache * s)2978 static void pcs_flush_all(struct kmem_cache *s)
2979 {
2980 struct slub_percpu_sheaves *pcs;
2981 struct slab_sheaf *spare, *rcu_free;
2982
2983 local_lock(&s->cpu_sheaves->lock);
2984 pcs = this_cpu_ptr(s->cpu_sheaves);
2985
2986 spare = pcs->spare;
2987 pcs->spare = NULL;
2988
2989 rcu_free = pcs->rcu_free;
2990 pcs->rcu_free = NULL;
2991
2992 local_unlock(&s->cpu_sheaves->lock);
2993
2994 if (spare) {
2995 sheaf_flush_unused(s, spare);
2996 free_empty_sheaf(s, spare);
2997 }
2998
2999 if (rcu_free)
3000 call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
3001
3002 sheaf_flush_main(s);
3003 }
3004
__pcs_flush_all_cpu(struct kmem_cache * s,unsigned int cpu)3005 static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu)
3006 {
3007 struct slub_percpu_sheaves *pcs;
3008
3009 pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3010
3011 /* The cpu is not executing anymore so we don't need pcs->lock */
3012 sheaf_flush_unused(s, pcs->main);
3013 if (pcs->spare) {
3014 sheaf_flush_unused(s, pcs->spare);
3015 free_empty_sheaf(s, pcs->spare);
3016 pcs->spare = NULL;
3017 }
3018
3019 if (pcs->rcu_free) {
3020 call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn);
3021 pcs->rcu_free = NULL;
3022 }
3023 }
3024
pcs_destroy(struct kmem_cache * s)3025 static void pcs_destroy(struct kmem_cache *s)
3026 {
3027 int cpu;
3028
3029 /*
3030 * We may be unwinding cache creation that failed before or during the
3031 * allocation of this.
3032 */
3033 if (!s->cpu_sheaves)
3034 return;
3035
3036 /* pcs->main can only point to the bootstrap sheaf, nothing to free */
3037 if (!cache_has_sheaves(s))
3038 goto free_pcs;
3039
3040 for_each_possible_cpu(cpu) {
3041 struct slub_percpu_sheaves *pcs;
3042
3043 pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3044
3045 /* This can happen when unwinding failed cache creation. */
3046 if (!pcs->main)
3047 continue;
3048
3049 /*
3050 * We have already passed __kmem_cache_shutdown() so everything
3051 * was flushed and there should be no objects allocated from
3052 * slabs, otherwise kmem_cache_destroy() would have aborted.
3053 * Therefore something would have to be really wrong if the
3054 * warnings here trigger, and we should rather leave objects and
3055 * sheaves to leak in that case.
3056 */
3057
3058 WARN_ON(pcs->spare);
3059 WARN_ON(pcs->rcu_free);
3060
3061 if (!WARN_ON(pcs->main->size)) {
3062 free_empty_sheaf(s, pcs->main);
3063 pcs->main = NULL;
3064 }
3065 }
3066
3067 free_pcs:
3068 free_percpu(s->cpu_sheaves);
3069 s->cpu_sheaves = NULL;
3070 }
3071
barn_get_empty_sheaf(struct node_barn * barn,bool allow_spin)3072 static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn,
3073 bool allow_spin)
3074 {
3075 struct slab_sheaf *empty = NULL;
3076 unsigned long flags;
3077
3078 if (!data_race(barn->nr_empty))
3079 return NULL;
3080
3081 if (likely(allow_spin))
3082 spin_lock_irqsave(&barn->lock, flags);
3083 else if (!spin_trylock_irqsave(&barn->lock, flags))
3084 return NULL;
3085
3086 if (likely(barn->nr_empty)) {
3087 empty = list_first_entry(&barn->sheaves_empty,
3088 struct slab_sheaf, barn_list);
3089 list_del(&empty->barn_list);
3090 barn->nr_empty--;
3091 }
3092
3093 spin_unlock_irqrestore(&barn->lock, flags);
3094
3095 return empty;
3096 }
3097
3098 /*
3099 * The following two functions are used mainly in cases where we have to undo an
3100 * intended action due to a race or cpu migration. Thus they do not check the
3101 * empty or full sheaf limits for simplicity.
3102 */
3103
barn_put_empty_sheaf(struct node_barn * barn,struct slab_sheaf * sheaf)3104 static void barn_put_empty_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
3105 {
3106 unsigned long flags;
3107
3108 spin_lock_irqsave(&barn->lock, flags);
3109
3110 list_add(&sheaf->barn_list, &barn->sheaves_empty);
3111 barn->nr_empty++;
3112
3113 spin_unlock_irqrestore(&barn->lock, flags);
3114 }
3115
barn_put_full_sheaf(struct node_barn * barn,struct slab_sheaf * sheaf)3116 static void barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
3117 {
3118 unsigned long flags;
3119
3120 spin_lock_irqsave(&barn->lock, flags);
3121
3122 list_add(&sheaf->barn_list, &barn->sheaves_full);
3123 barn->nr_full++;
3124
3125 spin_unlock_irqrestore(&barn->lock, flags);
3126 }
3127
barn_get_full_or_empty_sheaf(struct node_barn * barn)3128 static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
3129 {
3130 struct slab_sheaf *sheaf = NULL;
3131 unsigned long flags;
3132
3133 if (!data_race(barn->nr_full) && !data_race(barn->nr_empty))
3134 return NULL;
3135
3136 spin_lock_irqsave(&barn->lock, flags);
3137
3138 if (barn->nr_full) {
3139 sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
3140 barn_list);
3141 list_del(&sheaf->barn_list);
3142 barn->nr_full--;
3143 } else if (barn->nr_empty) {
3144 sheaf = list_first_entry(&barn->sheaves_empty,
3145 struct slab_sheaf, barn_list);
3146 list_del(&sheaf->barn_list);
3147 barn->nr_empty--;
3148 }
3149
3150 spin_unlock_irqrestore(&barn->lock, flags);
3151
3152 return sheaf;
3153 }
3154
3155 /*
3156 * If a full sheaf is available, return it and put the supplied empty one to
3157 * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't
3158 * change.
3159 */
3160 static struct slab_sheaf *
barn_replace_empty_sheaf(struct node_barn * barn,struct slab_sheaf * empty,bool allow_spin)3161 barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty,
3162 bool allow_spin)
3163 {
3164 struct slab_sheaf *full = NULL;
3165 unsigned long flags;
3166
3167 if (!data_race(barn->nr_full))
3168 return NULL;
3169
3170 if (likely(allow_spin))
3171 spin_lock_irqsave(&barn->lock, flags);
3172 else if (!spin_trylock_irqsave(&barn->lock, flags))
3173 return NULL;
3174
3175 if (likely(barn->nr_full)) {
3176 full = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
3177 barn_list);
3178 list_del(&full->barn_list);
3179 list_add(&empty->barn_list, &barn->sheaves_empty);
3180 barn->nr_full--;
3181 barn->nr_empty++;
3182 }
3183
3184 spin_unlock_irqrestore(&barn->lock, flags);
3185
3186 return full;
3187 }
3188
3189 /*
3190 * If an empty sheaf is available, return it and put the supplied full one to
3191 * barn. But if there are too many full sheaves, reject this with -E2BIG.
3192 */
3193 static struct slab_sheaf *
barn_replace_full_sheaf(struct node_barn * barn,struct slab_sheaf * full,bool allow_spin)3194 barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full,
3195 bool allow_spin)
3196 {
3197 struct slab_sheaf *empty;
3198 unsigned long flags;
3199
3200 /* we don't repeat this check under barn->lock as it's not critical */
3201 if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES)
3202 return ERR_PTR(-E2BIG);
3203 if (!data_race(barn->nr_empty))
3204 return ERR_PTR(-ENOMEM);
3205
3206 if (likely(allow_spin))
3207 spin_lock_irqsave(&barn->lock, flags);
3208 else if (!spin_trylock_irqsave(&barn->lock, flags))
3209 return ERR_PTR(-EBUSY);
3210
3211 if (likely(barn->nr_empty)) {
3212 empty = list_first_entry(&barn->sheaves_empty, struct slab_sheaf,
3213 barn_list);
3214 list_del(&empty->barn_list);
3215 list_add(&full->barn_list, &barn->sheaves_full);
3216 barn->nr_empty--;
3217 barn->nr_full++;
3218 } else {
3219 empty = ERR_PTR(-ENOMEM);
3220 }
3221
3222 spin_unlock_irqrestore(&barn->lock, flags);
3223
3224 return empty;
3225 }
3226
barn_init(struct node_barn * barn)3227 static void barn_init(struct node_barn *barn)
3228 {
3229 spin_lock_init(&barn->lock);
3230 INIT_LIST_HEAD(&barn->sheaves_full);
3231 INIT_LIST_HEAD(&barn->sheaves_empty);
3232 barn->nr_full = 0;
3233 barn->nr_empty = 0;
3234 }
3235
barn_shrink(struct kmem_cache * s,struct node_barn * barn)3236 static void barn_shrink(struct kmem_cache *s, struct node_barn *barn)
3237 {
3238 LIST_HEAD(empty_list);
3239 LIST_HEAD(full_list);
3240 struct slab_sheaf *sheaf, *sheaf2;
3241 unsigned long flags;
3242
3243 spin_lock_irqsave(&barn->lock, flags);
3244
3245 list_splice_init(&barn->sheaves_full, &full_list);
3246 barn->nr_full = 0;
3247 list_splice_init(&barn->sheaves_empty, &empty_list);
3248 barn->nr_empty = 0;
3249
3250 spin_unlock_irqrestore(&barn->lock, flags);
3251
3252 list_for_each_entry_safe(sheaf, sheaf2, &full_list, barn_list) {
3253 sheaf_flush_unused(s, sheaf);
3254 free_empty_sheaf(s, sheaf);
3255 }
3256
3257 list_for_each_entry_safe(sheaf, sheaf2, &empty_list, barn_list)
3258 free_empty_sheaf(s, sheaf);
3259 }
3260
3261 /*
3262 * Slab allocation and freeing
3263 */
alloc_slab_page(gfp_t flags,int node,struct kmem_cache_order_objects oo,bool allow_spin)3264 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
3265 struct kmem_cache_order_objects oo,
3266 bool allow_spin)
3267 {
3268 struct page *page;
3269 struct slab *slab;
3270 unsigned int order = oo_order(oo);
3271
3272 if (unlikely(!allow_spin))
3273 page = alloc_frozen_pages_nolock(0/* __GFP_COMP is implied */,
3274 node, order);
3275 else if (node == NUMA_NO_NODE)
3276 page = alloc_frozen_pages(flags, order);
3277 else
3278 page = __alloc_frozen_pages(flags, order, node, NULL);
3279
3280 if (!page)
3281 return NULL;
3282
3283 __SetPageSlab(page);
3284 slab = page_slab(page);
3285 if (page_is_pfmemalloc(page))
3286 slab_set_pfmemalloc(slab);
3287
3288 return slab;
3289 }
3290
3291 #ifdef CONFIG_SLAB_FREELIST_RANDOM
3292 /* Pre-initialize the random sequence cache */
init_cache_random_seq(struct kmem_cache * s)3293 static int init_cache_random_seq(struct kmem_cache *s)
3294 {
3295 unsigned int count = oo_objects(s->oo);
3296 int err;
3297
3298 /* Bailout if already initialised */
3299 if (s->random_seq)
3300 return 0;
3301
3302 err = cache_random_seq_create(s, count, GFP_KERNEL);
3303 if (err) {
3304 pr_err("SLUB: Unable to initialize free list for %s\n",
3305 s->name);
3306 return err;
3307 }
3308
3309 /* Transform to an offset on the set of pages */
3310 if (s->random_seq) {
3311 unsigned int i;
3312
3313 for (i = 0; i < count; i++)
3314 s->random_seq[i] *= s->size;
3315 }
3316 return 0;
3317 }
3318
3319 /* Initialize each random sequence freelist per cache */
init_freelist_randomization(void)3320 static void __init init_freelist_randomization(void)
3321 {
3322 struct kmem_cache *s;
3323
3324 mutex_lock(&slab_mutex);
3325
3326 list_for_each_entry(s, &slab_caches, list)
3327 init_cache_random_seq(s);
3328
3329 mutex_unlock(&slab_mutex);
3330 }
3331
3332 /* Get the next entry on the pre-computed freelist randomized */
next_freelist_entry(struct kmem_cache * s,unsigned long * pos,void * start,unsigned long page_limit,unsigned long freelist_count)3333 static void *next_freelist_entry(struct kmem_cache *s,
3334 unsigned long *pos, void *start,
3335 unsigned long page_limit,
3336 unsigned long freelist_count)
3337 {
3338 unsigned int idx;
3339
3340 /*
3341 * If the target page allocation failed, the number of objects on the
3342 * page might be smaller than the usual size defined by the cache.
3343 */
3344 do {
3345 idx = s->random_seq[*pos];
3346 *pos += 1;
3347 if (*pos >= freelist_count)
3348 *pos = 0;
3349 } while (unlikely(idx >= page_limit));
3350
3351 return (char *)start + idx;
3352 }
3353
3354 static DEFINE_PER_CPU(struct rnd_state, slab_rnd_state);
3355
3356 /* Shuffle the single linked freelist based on a random pre-computed sequence */
shuffle_freelist(struct kmem_cache * s,struct slab * slab,bool allow_spin)3357 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
3358 bool allow_spin)
3359 {
3360 void *start;
3361 void *cur;
3362 void *next;
3363 unsigned long idx, pos, page_limit, freelist_count;
3364
3365 if (slab->objects < 2 || !s->random_seq)
3366 return false;
3367
3368 freelist_count = oo_objects(s->oo);
3369 if (allow_spin) {
3370 pos = get_random_u32_below(freelist_count);
3371 } else {
3372 struct rnd_state *state;
3373
3374 /*
3375 * An interrupt or NMI handler might interrupt and change
3376 * the state in the middle, but that's safe.
3377 */
3378 state = &get_cpu_var(slab_rnd_state);
3379 pos = prandom_u32_state(state) % freelist_count;
3380 put_cpu_var(slab_rnd_state);
3381 }
3382
3383 page_limit = slab->objects * s->size;
3384 start = fixup_red_left(s, slab_address(slab));
3385
3386 /* First entry is used as the base of the freelist */
3387 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
3388 cur = setup_object(s, cur);
3389 slab->freelist = cur;
3390
3391 for (idx = 1; idx < slab->objects; idx++) {
3392 next = next_freelist_entry(s, &pos, start, page_limit,
3393 freelist_count);
3394 next = setup_object(s, next);
3395 set_freepointer(s, cur, next);
3396 cur = next;
3397 }
3398 set_freepointer(s, cur, NULL);
3399
3400 return true;
3401 }
3402 #else
init_cache_random_seq(struct kmem_cache * s)3403 static inline int init_cache_random_seq(struct kmem_cache *s)
3404 {
3405 return 0;
3406 }
init_freelist_randomization(void)3407 static inline void init_freelist_randomization(void) { }
shuffle_freelist(struct kmem_cache * s,struct slab * slab,bool allow_spin)3408 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
3409 bool allow_spin)
3410 {
3411 return false;
3412 }
3413 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
3414
account_slab(struct slab * slab,int order,struct kmem_cache * s,gfp_t gfp)3415 static __always_inline void account_slab(struct slab *slab, int order,
3416 struct kmem_cache *s, gfp_t gfp)
3417 {
3418 if (memcg_kmem_online() &&
3419 (s->flags & SLAB_ACCOUNT) &&
3420 !slab_obj_exts(slab))
3421 alloc_slab_obj_exts(slab, s, gfp, true);
3422
3423 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
3424 PAGE_SIZE << order);
3425 }
3426
unaccount_slab(struct slab * slab,int order,struct kmem_cache * s,bool allow_spin)3427 static __always_inline void unaccount_slab(struct slab *slab, int order,
3428 struct kmem_cache *s, bool allow_spin)
3429 {
3430 /*
3431 * The slab object extensions should now be freed regardless of
3432 * whether mem_alloc_profiling_enabled() or not because profiling
3433 * might have been disabled after slab->obj_exts got allocated.
3434 */
3435 free_slab_obj_exts(slab, allow_spin);
3436
3437 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
3438 -(PAGE_SIZE << order));
3439 }
3440
allocate_slab(struct kmem_cache * s,gfp_t flags,int node)3441 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
3442 {
3443 bool allow_spin = gfpflags_allow_spinning(flags);
3444 struct slab *slab;
3445 struct kmem_cache_order_objects oo = s->oo;
3446 gfp_t alloc_gfp;
3447 void *start, *p, *next;
3448 int idx;
3449 bool shuffle;
3450
3451 flags &= gfp_allowed_mask;
3452
3453 flags |= s->allocflags;
3454
3455 /*
3456 * Let the initial higher-order allocation fail under memory pressure
3457 * so we fall-back to the minimum order allocation.
3458 */
3459 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
3460 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
3461 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
3462
3463 /*
3464 * __GFP_RECLAIM could be cleared on the first allocation attempt,
3465 * so pass allow_spin flag directly.
3466 */
3467 slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
3468 if (unlikely(!slab)) {
3469 oo = s->min;
3470 alloc_gfp = flags;
3471 /*
3472 * Allocation may have failed due to fragmentation.
3473 * Try a lower order alloc if possible
3474 */
3475 slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
3476 if (unlikely(!slab))
3477 return NULL;
3478 stat(s, ORDER_FALLBACK);
3479 }
3480
3481 slab->objects = oo_objects(oo);
3482 slab->inuse = 0;
3483 slab->frozen = 0;
3484
3485 slab->slab_cache = s;
3486
3487 kasan_poison_slab(slab);
3488
3489 start = slab_address(slab);
3490
3491 setup_slab_debug(s, slab, start);
3492 init_slab_obj_exts(slab);
3493 /*
3494 * Poison the slab before initializing the slabobj_ext array
3495 * to prevent the array from being overwritten.
3496 */
3497 alloc_slab_obj_exts_early(s, slab);
3498 account_slab(slab, oo_order(oo), s, flags);
3499
3500 shuffle = shuffle_freelist(s, slab, allow_spin);
3501
3502 if (!shuffle) {
3503 start = fixup_red_left(s, start);
3504 start = setup_object(s, start);
3505 slab->freelist = start;
3506 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
3507 next = p + s->size;
3508 next = setup_object(s, next);
3509 set_freepointer(s, p, next);
3510 p = next;
3511 }
3512 set_freepointer(s, p, NULL);
3513 }
3514
3515 return slab;
3516 }
3517
new_slab(struct kmem_cache * s,gfp_t flags,int node)3518 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
3519 {
3520 if (unlikely(flags & GFP_SLAB_BUG_MASK))
3521 flags = kmalloc_fix_flags(flags);
3522
3523 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
3524
3525 return allocate_slab(s,
3526 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
3527 }
3528
__free_slab(struct kmem_cache * s,struct slab * slab,bool allow_spin)3529 static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin)
3530 {
3531 struct page *page = slab_page(slab);
3532 int order = compound_order(page);
3533 int pages = 1 << order;
3534
3535 __slab_clear_pfmemalloc(slab);
3536 page->mapping = NULL;
3537 __ClearPageSlab(page);
3538 mm_account_reclaimed_pages(pages);
3539 unaccount_slab(slab, order, s, allow_spin);
3540 if (allow_spin)
3541 free_frozen_pages(page, order);
3542 else
3543 free_frozen_pages_nolock(page, order);
3544 }
3545
free_new_slab_nolock(struct kmem_cache * s,struct slab * slab)3546 static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab)
3547 {
3548 /*
3549 * Since it was just allocated, we can skip the actions in
3550 * discard_slab() and free_slab().
3551 */
3552 __free_slab(s, slab, false);
3553 }
3554
rcu_free_slab(struct rcu_head * h)3555 static void rcu_free_slab(struct rcu_head *h)
3556 {
3557 struct slab *slab = container_of(h, struct slab, rcu_head);
3558
3559 __free_slab(slab->slab_cache, slab, true);
3560 }
3561
free_slab(struct kmem_cache * s,struct slab * slab)3562 static void free_slab(struct kmem_cache *s, struct slab *slab)
3563 {
3564 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
3565 void *p;
3566
3567 slab_pad_check(s, slab);
3568 for_each_object(p, s, slab_address(slab), slab->objects)
3569 check_object(s, slab, p, SLUB_RED_INACTIVE);
3570 }
3571
3572 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
3573 call_rcu(&slab->rcu_head, rcu_free_slab);
3574 else
3575 __free_slab(s, slab, true);
3576 }
3577
discard_slab(struct kmem_cache * s,struct slab * slab)3578 static void discard_slab(struct kmem_cache *s, struct slab *slab)
3579 {
3580 dec_slabs_node(s, slab_nid(slab), slab->objects);
3581 free_slab(s, slab);
3582 }
3583
slab_test_node_partial(const struct slab * slab)3584 static inline bool slab_test_node_partial(const struct slab *slab)
3585 {
3586 return test_bit(SL_partial, &slab->flags.f);
3587 }
3588
slab_set_node_partial(struct slab * slab)3589 static inline void slab_set_node_partial(struct slab *slab)
3590 {
3591 set_bit(SL_partial, &slab->flags.f);
3592 }
3593
slab_clear_node_partial(struct slab * slab)3594 static inline void slab_clear_node_partial(struct slab *slab)
3595 {
3596 clear_bit(SL_partial, &slab->flags.f);
3597 }
3598
3599 /*
3600 * Management of partially allocated slabs.
3601 */
3602 static inline void
__add_partial(struct kmem_cache_node * n,struct slab * slab,enum add_mode mode)3603 __add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode)
3604 {
3605 n->nr_partial++;
3606 if (mode == ADD_TO_TAIL)
3607 list_add_tail(&slab->slab_list, &n->partial);
3608 else
3609 list_add(&slab->slab_list, &n->partial);
3610 slab_set_node_partial(slab);
3611 }
3612
add_partial(struct kmem_cache_node * n,struct slab * slab,enum add_mode mode)3613 static inline void add_partial(struct kmem_cache_node *n,
3614 struct slab *slab, enum add_mode mode)
3615 {
3616 lockdep_assert_held(&n->list_lock);
3617 __add_partial(n, slab, mode);
3618 }
3619
remove_partial(struct kmem_cache_node * n,struct slab * slab)3620 static inline void remove_partial(struct kmem_cache_node *n,
3621 struct slab *slab)
3622 {
3623 lockdep_assert_held(&n->list_lock);
3624 list_del(&slab->slab_list);
3625 slab_clear_node_partial(slab);
3626 n->nr_partial--;
3627 }
3628
3629 /*
3630 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
3631 * slab from the n->partial list. Remove only a single object from the slab, do
3632 * the alloc_debug_processing() checks and leave the slab on the list, or move
3633 * it to full list if it was the last free object.
3634 */
alloc_single_from_partial(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab,int orig_size)3635 static void *alloc_single_from_partial(struct kmem_cache *s,
3636 struct kmem_cache_node *n, struct slab *slab, int orig_size)
3637 {
3638 void *object;
3639
3640 lockdep_assert_held(&n->list_lock);
3641
3642 #ifdef CONFIG_SLUB_DEBUG
3643 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3644 if (!validate_slab_ptr(slab)) {
3645 slab_err(s, slab, "Not a valid slab page");
3646 return NULL;
3647 }
3648 }
3649 #endif
3650
3651 object = slab->freelist;
3652 slab->freelist = get_freepointer(s, object);
3653 slab->inuse++;
3654
3655 if (!alloc_debug_processing(s, slab, object, orig_size)) {
3656 remove_partial(n, slab);
3657 return NULL;
3658 }
3659
3660 if (slab->inuse == slab->objects) {
3661 remove_partial(n, slab);
3662 add_full(s, n, slab);
3663 }
3664
3665 return object;
3666 }
3667
3668 /*
3669 * Called only for kmem_cache_debug() caches to allocate from a freshly
3670 * allocated slab. Allocate a single object instead of whole freelist
3671 * and put the slab to the partial (or full) list.
3672 */
alloc_single_from_new_slab(struct kmem_cache * s,struct slab * slab,int orig_size,gfp_t gfpflags)3673 static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
3674 int orig_size, gfp_t gfpflags)
3675 {
3676 bool allow_spin = gfpflags_allow_spinning(gfpflags);
3677 int nid = slab_nid(slab);
3678 struct kmem_cache_node *n = get_node(s, nid);
3679 unsigned long flags;
3680 void *object;
3681
3682 if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
3683 /* Unlucky, discard newly allocated slab. */
3684 free_new_slab_nolock(s, slab);
3685 return NULL;
3686 }
3687
3688 object = slab->freelist;
3689 slab->freelist = get_freepointer(s, object);
3690 slab->inuse = 1;
3691
3692 if (!alloc_debug_processing(s, slab, object, orig_size)) {
3693 /*
3694 * It's not really expected that this would fail on a
3695 * freshly allocated slab, but a concurrent memory
3696 * corruption in theory could cause that.
3697 * Leak memory of allocated slab.
3698 */
3699 if (!allow_spin)
3700 spin_unlock_irqrestore(&n->list_lock, flags);
3701 return NULL;
3702 }
3703
3704 if (allow_spin)
3705 spin_lock_irqsave(&n->list_lock, flags);
3706
3707 if (slab->inuse == slab->objects)
3708 add_full(s, n, slab);
3709 else
3710 add_partial(n, slab, ADD_TO_HEAD);
3711
3712 inc_slabs_node(s, nid, slab->objects);
3713 spin_unlock_irqrestore(&n->list_lock, flags);
3714
3715 return object;
3716 }
3717
3718 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
3719
get_partial_node_bulk(struct kmem_cache * s,struct kmem_cache_node * n,struct partial_bulk_context * pc,bool allow_spin)3720 static bool get_partial_node_bulk(struct kmem_cache *s,
3721 struct kmem_cache_node *n,
3722 struct partial_bulk_context *pc,
3723 bool allow_spin)
3724 {
3725 struct slab *slab, *slab2;
3726 unsigned int total_free = 0;
3727 unsigned long flags;
3728
3729 /* Racy check to avoid taking the lock unnecessarily. */
3730 if (!n || data_race(!n->nr_partial))
3731 return false;
3732
3733 INIT_LIST_HEAD(&pc->slabs);
3734
3735 if (allow_spin)
3736 spin_lock_irqsave(&n->list_lock, flags);
3737 else if (!spin_trylock_irqsave(&n->list_lock, flags))
3738 return false;
3739
3740 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
3741 struct freelist_counters flc;
3742 unsigned int slab_free;
3743
3744 if (!pfmemalloc_match(slab, pc->flags))
3745 continue;
3746
3747 /*
3748 * determine the number of free objects in the slab racily
3749 *
3750 * slab_free is a lower bound due to possible subsequent
3751 * concurrent freeing, so the caller may get more objects than
3752 * requested and must handle that
3753 */
3754 flc.counters = data_race(READ_ONCE(slab->counters));
3755 slab_free = flc.objects - flc.inuse;
3756
3757 /* we have already min and this would get us over the max */
3758 if (total_free >= pc->min_objects
3759 && total_free + slab_free > pc->max_objects)
3760 break;
3761
3762 remove_partial(n, slab);
3763
3764 list_add(&slab->slab_list, &pc->slabs);
3765
3766 total_free += slab_free;
3767 if (total_free >= pc->max_objects)
3768 break;
3769 }
3770
3771 spin_unlock_irqrestore(&n->list_lock, flags);
3772 return total_free > 0;
3773 }
3774
3775 /*
3776 * Try to allocate object from a partial slab on a specific node.
3777 */
get_from_partial_node(struct kmem_cache * s,struct kmem_cache_node * n,struct partial_context * pc)3778 static void *get_from_partial_node(struct kmem_cache *s,
3779 struct kmem_cache_node *n,
3780 struct partial_context *pc)
3781 {
3782 struct slab *slab, *slab2;
3783 unsigned long flags;
3784 void *object = NULL;
3785
3786 /*
3787 * Racy check. If we mistakenly see no partial slabs then we
3788 * just allocate an empty slab. If we mistakenly try to get a
3789 * partial slab and there is none available then get_from_partial()
3790 * will return NULL.
3791 */
3792 if (!n || !n->nr_partial)
3793 return NULL;
3794
3795 if (gfpflags_allow_spinning(pc->flags))
3796 spin_lock_irqsave(&n->list_lock, flags);
3797 else if (!spin_trylock_irqsave(&n->list_lock, flags))
3798 return NULL;
3799 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
3800
3801 struct freelist_counters old, new;
3802
3803 if (!pfmemalloc_match(slab, pc->flags))
3804 continue;
3805
3806 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
3807 object = alloc_single_from_partial(s, n, slab,
3808 pc->orig_size);
3809 if (object)
3810 break;
3811 continue;
3812 }
3813
3814 /*
3815 * get a single object from the slab. This might race against
3816 * __slab_free(), which however has to take the list_lock if
3817 * it's about to make the slab fully free.
3818 */
3819 do {
3820 old.freelist = slab->freelist;
3821 old.counters = slab->counters;
3822
3823 new.freelist = get_freepointer(s, old.freelist);
3824 new.counters = old.counters;
3825 new.inuse++;
3826
3827 } while (!__slab_update_freelist(s, slab, &old, &new, "get_from_partial_node"));
3828
3829 object = old.freelist;
3830 if (!new.freelist)
3831 remove_partial(n, slab);
3832
3833 break;
3834 }
3835 spin_unlock_irqrestore(&n->list_lock, flags);
3836 return object;
3837 }
3838
3839 /*
3840 * Get an object from somewhere. Search in increasing NUMA distances.
3841 */
get_from_any_partial(struct kmem_cache * s,struct partial_context * pc)3842 static void *get_from_any_partial(struct kmem_cache *s, struct partial_context *pc)
3843 {
3844 #ifdef CONFIG_NUMA
3845 struct zonelist *zonelist;
3846 struct zoneref *z;
3847 struct zone *zone;
3848 enum zone_type highest_zoneidx = gfp_zone(pc->flags);
3849 unsigned int cpuset_mems_cookie;
3850 bool allow_spin = gfpflags_allow_spinning(pc->flags);
3851
3852 /*
3853 * The defrag ratio allows a configuration of the tradeoffs between
3854 * inter node defragmentation and node local allocations. A lower
3855 * defrag_ratio increases the tendency to do local allocations
3856 * instead of attempting to obtain partial slabs from other nodes.
3857 *
3858 * If the defrag_ratio is set to 0 then kmalloc() always
3859 * returns node local objects. If the ratio is higher then kmalloc()
3860 * may return off node objects because partial slabs are obtained
3861 * from other nodes and filled up.
3862 *
3863 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
3864 * (which makes defrag_ratio = 1000) then every (well almost)
3865 * allocation will first attempt to defrag slab caches on other nodes.
3866 * This means scanning over all nodes to look for partial slabs which
3867 * may be expensive if we do it every time we are trying to find a slab
3868 * with available objects.
3869 */
3870 if (!s->remote_node_defrag_ratio ||
3871 get_cycles() % 1024 > s->remote_node_defrag_ratio)
3872 return NULL;
3873
3874 do {
3875 /*
3876 * read_mems_allowed_begin() accesses current->mems_allowed_seq,
3877 * a seqcount_spinlock_t that is not NMI-safe. Do not access
3878 * current->mems_allowed_seq and avoid retry when GFP flags
3879 * indicate spinning is not allowed.
3880 */
3881 if (allow_spin)
3882 cpuset_mems_cookie = read_mems_allowed_begin();
3883
3884 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
3885 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3886 struct kmem_cache_node *n;
3887
3888 n = get_node(s, zone_to_nid(zone));
3889
3890 if (n && cpuset_zone_allowed(zone, pc->flags) &&
3891 n->nr_partial > s->min_partial) {
3892
3893 void *object = get_from_partial_node(s, n, pc);
3894
3895 if (object) {
3896 /*
3897 * Don't check read_mems_allowed_retry()
3898 * here - if mems_allowed was updated in
3899 * parallel, that was a harmless race
3900 * between allocation and the cpuset
3901 * update
3902 */
3903 return object;
3904 }
3905 }
3906 }
3907 } while (allow_spin && read_mems_allowed_retry(cpuset_mems_cookie));
3908 #endif /* CONFIG_NUMA */
3909 return NULL;
3910 }
3911
3912 /*
3913 * Get an object from a partial slab
3914 */
get_from_partial(struct kmem_cache * s,int node,struct partial_context * pc)3915 static void *get_from_partial(struct kmem_cache *s, int node,
3916 struct partial_context *pc)
3917 {
3918 int searchnode = node;
3919 void *object;
3920
3921 if (node == NUMA_NO_NODE)
3922 searchnode = numa_mem_id();
3923
3924 object = get_from_partial_node(s, get_node(s, searchnode), pc);
3925 if (object || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
3926 return object;
3927
3928 return get_from_any_partial(s, pc);
3929 }
3930
has_pcs_used(int cpu,struct kmem_cache * s)3931 static bool has_pcs_used(int cpu, struct kmem_cache *s)
3932 {
3933 struct slub_percpu_sheaves *pcs;
3934
3935 if (!cache_has_sheaves(s))
3936 return false;
3937
3938 pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3939
3940 return (pcs->spare || pcs->rcu_free || pcs->main->size);
3941 }
3942
3943 /*
3944 * Flush percpu sheaves
3945 *
3946 * Called from CPU work handler with migration disabled.
3947 */
flush_cpu_sheaves(struct work_struct * w)3948 static void flush_cpu_sheaves(struct work_struct *w)
3949 {
3950 struct kmem_cache *s;
3951 struct slub_flush_work *sfw;
3952
3953 sfw = container_of(w, struct slub_flush_work, work);
3954
3955 s = sfw->s;
3956
3957 if (cache_has_sheaves(s))
3958 pcs_flush_all(s);
3959 }
3960
flush_all_cpus_locked(struct kmem_cache * s)3961 static void flush_all_cpus_locked(struct kmem_cache *s)
3962 {
3963 struct slub_flush_work *sfw;
3964 unsigned int cpu;
3965
3966 lockdep_assert_cpus_held();
3967 mutex_lock(&flush_lock);
3968
3969 for_each_online_cpu(cpu) {
3970 sfw = &per_cpu(slub_flush, cpu);
3971 if (!has_pcs_used(cpu, s)) {
3972 sfw->skip = true;
3973 continue;
3974 }
3975 INIT_WORK(&sfw->work, flush_cpu_sheaves);
3976 sfw->skip = false;
3977 sfw->s = s;
3978 queue_work_on(cpu, flushwq, &sfw->work);
3979 }
3980
3981 for_each_online_cpu(cpu) {
3982 sfw = &per_cpu(slub_flush, cpu);
3983 if (sfw->skip)
3984 continue;
3985 flush_work(&sfw->work);
3986 }
3987
3988 mutex_unlock(&flush_lock);
3989 }
3990
flush_all(struct kmem_cache * s)3991 static void flush_all(struct kmem_cache *s)
3992 {
3993 cpus_read_lock();
3994 flush_all_cpus_locked(s);
3995 cpus_read_unlock();
3996 }
3997
flush_rcu_sheaf(struct work_struct * w)3998 static void flush_rcu_sheaf(struct work_struct *w)
3999 {
4000 struct slub_percpu_sheaves *pcs;
4001 struct slab_sheaf *rcu_free;
4002 struct slub_flush_work *sfw;
4003 struct kmem_cache *s;
4004
4005 sfw = container_of(w, struct slub_flush_work, work);
4006 s = sfw->s;
4007
4008 local_lock(&s->cpu_sheaves->lock);
4009 pcs = this_cpu_ptr(s->cpu_sheaves);
4010
4011 rcu_free = pcs->rcu_free;
4012 pcs->rcu_free = NULL;
4013
4014 local_unlock(&s->cpu_sheaves->lock);
4015
4016 if (rcu_free)
4017 call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
4018 }
4019
4020
4021 /* needed for kvfree_rcu_barrier() */
flush_rcu_sheaves_on_cache(struct kmem_cache * s)4022 void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
4023 {
4024 struct slub_flush_work *sfw;
4025 unsigned int cpu;
4026
4027 mutex_lock(&flush_lock);
4028
4029 for_each_online_cpu(cpu) {
4030 sfw = &per_cpu(slub_flush, cpu);
4031
4032 /*
4033 * we don't check if rcu_free sheaf exists - racing
4034 * __kfree_rcu_sheaf() might have just removed it.
4035 * by executing flush_rcu_sheaf() on the cpu we make
4036 * sure the __kfree_rcu_sheaf() finished its call_rcu()
4037 */
4038
4039 INIT_WORK(&sfw->work, flush_rcu_sheaf);
4040 sfw->s = s;
4041 queue_work_on(cpu, flushwq, &sfw->work);
4042 }
4043
4044 for_each_online_cpu(cpu) {
4045 sfw = &per_cpu(slub_flush, cpu);
4046 flush_work(&sfw->work);
4047 }
4048
4049 mutex_unlock(&flush_lock);
4050 }
4051
flush_all_rcu_sheaves(void)4052 void flush_all_rcu_sheaves(void)
4053 {
4054 struct kmem_cache *s;
4055
4056 cpus_read_lock();
4057 mutex_lock(&slab_mutex);
4058
4059 list_for_each_entry(s, &slab_caches, list) {
4060 if (!cache_has_sheaves(s))
4061 continue;
4062 flush_rcu_sheaves_on_cache(s);
4063 }
4064
4065 mutex_unlock(&slab_mutex);
4066 cpus_read_unlock();
4067
4068 rcu_barrier();
4069 }
4070
slub_cpu_setup(unsigned int cpu)4071 static int slub_cpu_setup(unsigned int cpu)
4072 {
4073 int nid = cpu_to_node(cpu);
4074 struct kmem_cache *s;
4075 int ret = 0;
4076
4077 /*
4078 * we never clear a nid so it's safe to do a quick check before taking
4079 * the mutex, and then recheck to handle parallel cpu hotplug safely
4080 */
4081 if (node_isset(nid, slab_barn_nodes))
4082 return 0;
4083
4084 mutex_lock(&slab_mutex);
4085
4086 if (node_isset(nid, slab_barn_nodes))
4087 goto out;
4088
4089 list_for_each_entry(s, &slab_caches, list) {
4090 struct node_barn *barn;
4091
4092 /*
4093 * barn might already exist if a previous callback failed midway
4094 */
4095 if (!cache_has_sheaves(s) || get_barn_node(s, nid))
4096 continue;
4097
4098 barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid);
4099
4100 if (!barn) {
4101 ret = -ENOMEM;
4102 goto out;
4103 }
4104
4105 barn_init(barn);
4106 s->per_node[nid].barn = barn;
4107 }
4108 node_set(nid, slab_barn_nodes);
4109
4110 out:
4111 mutex_unlock(&slab_mutex);
4112
4113 return ret;
4114 }
4115
4116 /*
4117 * Use the cpu notifier to insure that the cpu slabs are flushed when
4118 * necessary.
4119 */
slub_cpu_dead(unsigned int cpu)4120 static int slub_cpu_dead(unsigned int cpu)
4121 {
4122 struct kmem_cache *s;
4123
4124 mutex_lock(&slab_mutex);
4125 list_for_each_entry(s, &slab_caches, list) {
4126 if (cache_has_sheaves(s))
4127 __pcs_flush_all_cpu(s, cpu);
4128 }
4129 mutex_unlock(&slab_mutex);
4130 return 0;
4131 }
4132
4133 #ifdef CONFIG_SLUB_DEBUG
count_free(struct slab * slab)4134 static int count_free(struct slab *slab)
4135 {
4136 return slab->objects - slab->inuse;
4137 }
4138
node_nr_objs(struct kmem_cache_node * n)4139 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
4140 {
4141 return atomic_long_read(&n->total_objects);
4142 }
4143
4144 /* Supports checking bulk free of a constructed freelist */
free_debug_processing(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int * bulk_cnt,unsigned long addr,depot_stack_handle_t handle)4145 static inline bool free_debug_processing(struct kmem_cache *s,
4146 struct slab *slab, void *head, void *tail, int *bulk_cnt,
4147 unsigned long addr, depot_stack_handle_t handle)
4148 {
4149 bool checks_ok = false;
4150 void *object = head;
4151 int cnt = 0;
4152
4153 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
4154 if (!check_slab(s, slab))
4155 goto out;
4156 }
4157
4158 if (slab->inuse < *bulk_cnt) {
4159 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
4160 slab->inuse, *bulk_cnt);
4161 goto out;
4162 }
4163
4164 next_object:
4165
4166 if (++cnt > *bulk_cnt)
4167 goto out_cnt;
4168
4169 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
4170 if (!free_consistency_checks(s, slab, object, addr))
4171 goto out;
4172 }
4173
4174 if (s->flags & SLAB_STORE_USER)
4175 set_track_update(s, object, TRACK_FREE, addr, handle);
4176 trace(s, slab, object, 0);
4177 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
4178 init_object(s, object, SLUB_RED_INACTIVE);
4179
4180 /* Reached end of constructed freelist yet? */
4181 if (object != tail) {
4182 object = get_freepointer(s, object);
4183 goto next_object;
4184 }
4185 checks_ok = true;
4186
4187 out_cnt:
4188 if (cnt != *bulk_cnt) {
4189 slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
4190 *bulk_cnt, cnt);
4191 *bulk_cnt = cnt;
4192 }
4193
4194 out:
4195
4196 if (!checks_ok)
4197 slab_fix(s, "Object at 0x%p not freed", object);
4198
4199 return checks_ok;
4200 }
4201 #endif /* CONFIG_SLUB_DEBUG */
4202
4203 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
count_partial(struct kmem_cache_node * n,int (* get_count)(struct slab *))4204 static unsigned long count_partial(struct kmem_cache_node *n,
4205 int (*get_count)(struct slab *))
4206 {
4207 unsigned long flags;
4208 unsigned long x = 0;
4209 struct slab *slab;
4210
4211 spin_lock_irqsave(&n->list_lock, flags);
4212 list_for_each_entry(slab, &n->partial, slab_list)
4213 x += get_count(slab);
4214 spin_unlock_irqrestore(&n->list_lock, flags);
4215 return x;
4216 }
4217 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
4218
4219 #ifdef CONFIG_SLUB_DEBUG
4220 #define MAX_PARTIAL_TO_SCAN 10000
4221
count_partial_free_approx(struct kmem_cache_node * n)4222 static unsigned long count_partial_free_approx(struct kmem_cache_node *n)
4223 {
4224 unsigned long flags;
4225 unsigned long x = 0;
4226 struct slab *slab;
4227
4228 spin_lock_irqsave(&n->list_lock, flags);
4229 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
4230 list_for_each_entry(slab, &n->partial, slab_list)
4231 x += slab->objects - slab->inuse;
4232 } else {
4233 /*
4234 * For a long list, approximate the total count of objects in
4235 * it to meet the limit on the number of slabs to scan.
4236 * Scan from both the list's head and tail for better accuracy.
4237 */
4238 unsigned long scanned = 0;
4239
4240 list_for_each_entry(slab, &n->partial, slab_list) {
4241 x += slab->objects - slab->inuse;
4242 if (++scanned == MAX_PARTIAL_TO_SCAN / 2)
4243 break;
4244 }
4245 list_for_each_entry_reverse(slab, &n->partial, slab_list) {
4246 x += slab->objects - slab->inuse;
4247 if (++scanned == MAX_PARTIAL_TO_SCAN)
4248 break;
4249 }
4250 x = mult_frac(x, n->nr_partial, scanned);
4251 x = min(x, node_nr_objs(n));
4252 }
4253 spin_unlock_irqrestore(&n->list_lock, flags);
4254 return x;
4255 }
4256
4257 static noinline void
slab_out_of_memory(struct kmem_cache * s,gfp_t gfpflags,int nid)4258 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
4259 {
4260 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
4261 DEFAULT_RATELIMIT_BURST);
4262 int cpu = raw_smp_processor_id();
4263 int node;
4264 struct kmem_cache_node *n;
4265
4266 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
4267 return;
4268
4269 pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n",
4270 cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
4271 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
4272 s->name, s->object_size, s->size, oo_order(s->oo),
4273 oo_order(s->min));
4274
4275 if (oo_order(s->min) > get_order(s->object_size))
4276 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n",
4277 s->name);
4278
4279 for_each_kmem_cache_node(s, node, n) {
4280 unsigned long nr_slabs;
4281 unsigned long nr_objs;
4282 unsigned long nr_free;
4283
4284 nr_free = count_partial_free_approx(n);
4285 nr_slabs = node_nr_slabs(n);
4286 nr_objs = node_nr_objs(n);
4287
4288 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
4289 node, nr_slabs, nr_objs, nr_free);
4290 }
4291 }
4292 #else /* CONFIG_SLUB_DEBUG */
4293 static inline void
slab_out_of_memory(struct kmem_cache * s,gfp_t gfpflags,int nid)4294 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
4295 #endif
4296
pfmemalloc_match(struct slab * slab,gfp_t gfpflags)4297 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
4298 {
4299 if (unlikely(slab_test_pfmemalloc(slab)))
4300 return gfp_pfmemalloc_allowed(gfpflags);
4301
4302 return true;
4303 }
4304
4305 /*
4306 * Get the slab's freelist and do not freeze it.
4307 *
4308 * Assumes the slab is isolated from node partial list and not frozen.
4309 *
4310 * Assumes this is performed only for caches without debugging so we
4311 * don't need to worry about adding the slab to the full list.
4312 */
get_freelist_nofreeze(struct kmem_cache * s,struct slab * slab)4313 static inline void *get_freelist_nofreeze(struct kmem_cache *s, struct slab *slab)
4314 {
4315 struct freelist_counters old, new;
4316
4317 do {
4318 old.freelist = slab->freelist;
4319 old.counters = slab->counters;
4320
4321 new.freelist = NULL;
4322 new.counters = old.counters;
4323 VM_WARN_ON_ONCE(new.frozen);
4324
4325 new.inuse = old.objects;
4326
4327 } while (!slab_update_freelist(s, slab, &old, &new, "get_freelist_nofreeze"));
4328
4329 return old.freelist;
4330 }
4331
4332 /*
4333 * If the object has been wiped upon free, make sure it's fully initialized by
4334 * zeroing out freelist pointer.
4335 *
4336 * Note that we also wipe custom freelist pointers.
4337 */
maybe_wipe_obj_freeptr(struct kmem_cache * s,void * obj)4338 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
4339 void *obj)
4340 {
4341 if (unlikely(slab_want_init_on_free(s)) && obj &&
4342 !freeptr_outside_object(s))
4343 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
4344 0, sizeof(void *));
4345 }
4346
alloc_from_new_slab(struct kmem_cache * s,struct slab * slab,void ** p,unsigned int count,bool allow_spin)4347 static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
4348 void **p, unsigned int count, bool allow_spin)
4349 {
4350 unsigned int allocated = 0;
4351 struct kmem_cache_node *n;
4352 bool needs_add_partial;
4353 unsigned long flags;
4354 void *object;
4355
4356 /*
4357 * Are we going to put the slab on the partial list?
4358 * Note slab->inuse is 0 on a new slab.
4359 */
4360 needs_add_partial = (slab->objects > count);
4361
4362 if (!allow_spin && needs_add_partial) {
4363
4364 n = get_node(s, slab_nid(slab));
4365
4366 if (!spin_trylock_irqsave(&n->list_lock, flags)) {
4367 /* Unlucky, discard newly allocated slab */
4368 free_new_slab_nolock(s, slab);
4369 return 0;
4370 }
4371 }
4372
4373 object = slab->freelist;
4374 while (object && allocated < count) {
4375 p[allocated] = object;
4376 object = get_freepointer(s, object);
4377 maybe_wipe_obj_freeptr(s, p[allocated]);
4378
4379 slab->inuse++;
4380 allocated++;
4381 }
4382 slab->freelist = object;
4383
4384 if (needs_add_partial) {
4385
4386 if (allow_spin) {
4387 n = get_node(s, slab_nid(slab));
4388 spin_lock_irqsave(&n->list_lock, flags);
4389 }
4390 add_partial(n, slab, ADD_TO_HEAD);
4391 spin_unlock_irqrestore(&n->list_lock, flags);
4392 }
4393
4394 inc_slabs_node(s, slab_nid(slab), slab->objects);
4395 return allocated;
4396 }
4397
4398 /*
4399 * Slow path. We failed to allocate via percpu sheaves or they are not available
4400 * due to bootstrap or debugging enabled or SLUB_TINY.
4401 *
4402 * We try to allocate from partial slab lists and fall back to allocating a new
4403 * slab.
4404 */
___slab_alloc(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,unsigned int orig_size)4405 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
4406 unsigned long addr, unsigned int orig_size)
4407 {
4408 bool allow_spin = gfpflags_allow_spinning(gfpflags);
4409 void *object;
4410 struct slab *slab;
4411 struct partial_context pc;
4412 bool try_thisnode = true;
4413
4414 stat(s, ALLOC_SLOWPATH);
4415
4416 new_objects:
4417
4418 pc.flags = gfpflags;
4419 /*
4420 * When a preferred node is indicated but no __GFP_THISNODE
4421 *
4422 * 1) try to get a partial slab from target node only by having
4423 * __GFP_THISNODE in pc.flags for get_from_partial()
4424 * 2) if 1) failed, try to allocate a new slab from target node with
4425 * GPF_NOWAIT | __GFP_THISNODE opportunistically
4426 * 3) if 2) failed, retry with original gfpflags which will allow
4427 * get_from_partial() try partial lists of other nodes before
4428 * potentially allocating new page from other nodes
4429 */
4430 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
4431 && try_thisnode)) {
4432 if (unlikely(!allow_spin))
4433 /* Do not upgrade gfp to NOWAIT from more restrictive mode */
4434 pc.flags = gfpflags | __GFP_THISNODE;
4435 else
4436 pc.flags = GFP_NOWAIT | __GFP_THISNODE;
4437 }
4438
4439 pc.orig_size = orig_size;
4440 object = get_from_partial(s, node, &pc);
4441 if (object)
4442 goto success;
4443
4444 slab = new_slab(s, pc.flags, node);
4445
4446 if (unlikely(!slab)) {
4447 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
4448 && try_thisnode) {
4449 try_thisnode = false;
4450 goto new_objects;
4451 }
4452 slab_out_of_memory(s, gfpflags, node);
4453 return NULL;
4454 }
4455
4456 stat(s, ALLOC_SLAB);
4457
4458 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4459 object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
4460
4461 if (likely(object))
4462 goto success;
4463 } else {
4464 alloc_from_new_slab(s, slab, &object, 1, allow_spin);
4465
4466 /* we don't need to check SLAB_STORE_USER here */
4467 if (likely(object))
4468 return object;
4469 }
4470
4471 if (allow_spin)
4472 goto new_objects;
4473
4474 /* This could cause an endless loop. Fail instead. */
4475 return NULL;
4476
4477 success:
4478 if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
4479 set_track(s, object, TRACK_ALLOC, addr, gfpflags);
4480
4481 return object;
4482 }
4483
__slab_alloc_node(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,size_t orig_size)4484 static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
4485 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4486 {
4487 void *object;
4488
4489 #ifdef CONFIG_NUMA
4490 if (static_branch_unlikely(&strict_numa) &&
4491 node == NUMA_NO_NODE) {
4492
4493 struct mempolicy *mpol = current->mempolicy;
4494
4495 if (mpol) {
4496 /*
4497 * Special BIND rule support. If the local node
4498 * is in permitted set then do not redirect
4499 * to a particular node.
4500 * Otherwise we apply the memory policy to get
4501 * the node we need to allocate on.
4502 */
4503 if (mpol->mode != MPOL_BIND ||
4504 !node_isset(numa_mem_id(), mpol->nodes))
4505 node = mempolicy_slab_node();
4506 }
4507 }
4508 #endif
4509
4510 object = ___slab_alloc(s, gfpflags, node, addr, orig_size);
4511
4512 return object;
4513 }
4514
4515 static __fastpath_inline
slab_pre_alloc_hook(struct kmem_cache * s,gfp_t flags)4516 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
4517 {
4518 flags &= gfp_allowed_mask;
4519
4520 might_alloc(flags);
4521
4522 if (unlikely(should_failslab(s, flags)))
4523 return NULL;
4524
4525 return s;
4526 }
4527
4528 static __fastpath_inline
slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p,bool init,unsigned int orig_size)4529 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
4530 gfp_t flags, size_t size, void **p, bool init,
4531 unsigned int orig_size)
4532 {
4533 unsigned int zero_size = s->object_size;
4534 bool kasan_init = init;
4535 size_t i;
4536 gfp_t init_flags = flags & gfp_allowed_mask;
4537
4538 /*
4539 * For kmalloc object, the allocated memory size(object_size) is likely
4540 * larger than the requested size(orig_size). If redzone check is
4541 * enabled for the extra space, don't zero it, as it will be redzoned
4542 * soon. The redzone operation for this extra space could be seen as a
4543 * replacement of current poisoning under certain debug option, and
4544 * won't break other sanity checks.
4545 */
4546 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
4547 (s->flags & SLAB_KMALLOC))
4548 zero_size = orig_size;
4549
4550 /*
4551 * When slab_debug is enabled, avoid memory initialization integrated
4552 * into KASAN and instead zero out the memory via the memset below with
4553 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
4554 * cause false-positive reports. This does not lead to a performance
4555 * penalty on production builds, as slab_debug is not intended to be
4556 * enabled there.
4557 */
4558 if (__slub_debug_enabled())
4559 kasan_init = false;
4560
4561 /*
4562 * As memory initialization might be integrated into KASAN,
4563 * kasan_slab_alloc and initialization memset must be
4564 * kept together to avoid discrepancies in behavior.
4565 *
4566 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
4567 */
4568 for (i = 0; i < size; i++) {
4569 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
4570 if (p[i] && init && (!kasan_init ||
4571 !kasan_has_integrated_init()))
4572 memset(p[i], 0, zero_size);
4573 if (gfpflags_allow_spinning(flags))
4574 kmemleak_alloc_recursive(p[i], s->object_size, 1,
4575 s->flags, init_flags);
4576 kmsan_slab_alloc(s, p[i], init_flags);
4577 alloc_tagging_slab_alloc_hook(s, p[i], flags);
4578 }
4579
4580 return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
4581 }
4582
4583 /*
4584 * Replace the empty main sheaf with a (at least partially) full sheaf.
4585 *
4586 * Must be called with the cpu_sheaves local lock locked. If successful, returns
4587 * the pcs pointer and the local lock locked (possibly on a different cpu than
4588 * initially called). If not successful, returns NULL and the local lock
4589 * unlocked.
4590 */
4591 static struct slub_percpu_sheaves *
__pcs_replace_empty_main(struct kmem_cache * s,struct slub_percpu_sheaves * pcs,gfp_t gfp)4592 __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, gfp_t gfp)
4593 {
4594 struct slab_sheaf *empty = NULL;
4595 struct slab_sheaf *full;
4596 struct node_barn *barn;
4597 bool allow_spin;
4598
4599 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
4600
4601 /* Bootstrap or debug cache, back off */
4602 if (unlikely(!cache_has_sheaves(s))) {
4603 local_unlock(&s->cpu_sheaves->lock);
4604 return NULL;
4605 }
4606
4607 if (pcs->spare && pcs->spare->size > 0) {
4608 swap(pcs->main, pcs->spare);
4609 return pcs;
4610 }
4611
4612 barn = get_barn(s);
4613 if (!barn) {
4614 local_unlock(&s->cpu_sheaves->lock);
4615 return NULL;
4616 }
4617
4618 allow_spin = gfpflags_allow_spinning(gfp);
4619
4620 full = barn_replace_empty_sheaf(barn, pcs->main, allow_spin);
4621
4622 if (full) {
4623 stat(s, BARN_GET);
4624 pcs->main = full;
4625 return pcs;
4626 }
4627
4628 stat(s, BARN_GET_FAIL);
4629
4630 if (allow_spin) {
4631 if (pcs->spare) {
4632 empty = pcs->spare;
4633 pcs->spare = NULL;
4634 } else {
4635 empty = barn_get_empty_sheaf(barn, true);
4636 }
4637 }
4638
4639 local_unlock(&s->cpu_sheaves->lock);
4640 pcs = NULL;
4641
4642 if (!allow_spin)
4643 return NULL;
4644
4645 if (!empty) {
4646 empty = alloc_empty_sheaf(s, gfp);
4647 if (!empty)
4648 return NULL;
4649 }
4650
4651 if (refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
4652 /*
4653 * we must be very low on memory so don't bother
4654 * with the barn
4655 */
4656 sheaf_flush_unused(s, empty);
4657 free_empty_sheaf(s, empty);
4658
4659 return NULL;
4660 }
4661
4662 full = empty;
4663 empty = NULL;
4664
4665 if (!local_trylock(&s->cpu_sheaves->lock))
4666 goto barn_put;
4667 pcs = this_cpu_ptr(s->cpu_sheaves);
4668
4669 /*
4670 * If we put any empty or full sheaf to the barn below, it's due to
4671 * racing or being migrated to a different cpu. Breaching the barn's
4672 * sheaf limits should be thus rare enough so just ignore them to
4673 * simplify the recovery.
4674 */
4675
4676 if (pcs->main->size == 0) {
4677 if (!pcs->spare)
4678 pcs->spare = pcs->main;
4679 else
4680 barn_put_empty_sheaf(barn, pcs->main);
4681 pcs->main = full;
4682 return pcs;
4683 }
4684
4685 if (!pcs->spare) {
4686 pcs->spare = full;
4687 return pcs;
4688 }
4689
4690 if (pcs->spare->size == 0) {
4691 barn_put_empty_sheaf(barn, pcs->spare);
4692 pcs->spare = full;
4693 return pcs;
4694 }
4695
4696 barn_put:
4697 barn_put_full_sheaf(barn, full);
4698 stat(s, BARN_PUT);
4699
4700 return pcs;
4701 }
4702
4703 static __fastpath_inline
alloc_from_pcs(struct kmem_cache * s,gfp_t gfp,int node)4704 void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
4705 {
4706 struct slub_percpu_sheaves *pcs;
4707 bool node_requested;
4708 void *object;
4709
4710 #ifdef CONFIG_NUMA
4711 if (static_branch_unlikely(&strict_numa) &&
4712 node == NUMA_NO_NODE) {
4713
4714 struct mempolicy *mpol = current->mempolicy;
4715
4716 if (mpol) {
4717 /*
4718 * Special BIND rule support. If the local node
4719 * is in permitted set then do not redirect
4720 * to a particular node.
4721 * Otherwise we apply the memory policy to get
4722 * the node we need to allocate on.
4723 */
4724 if (mpol->mode != MPOL_BIND ||
4725 !node_isset(numa_mem_id(), mpol->nodes))
4726
4727 node = mempolicy_slab_node();
4728 }
4729 }
4730 #endif
4731
4732 node_requested = IS_ENABLED(CONFIG_NUMA) && node != NUMA_NO_NODE;
4733
4734 /*
4735 * We assume the percpu sheaves contain only local objects although it's
4736 * not completely guaranteed, so we verify later.
4737 */
4738 if (unlikely(node_requested && node != numa_mem_id())) {
4739 stat(s, ALLOC_NODE_MISMATCH);
4740 return NULL;
4741 }
4742
4743 if (!local_trylock(&s->cpu_sheaves->lock))
4744 return NULL;
4745
4746 pcs = this_cpu_ptr(s->cpu_sheaves);
4747
4748 if (unlikely(pcs->main->size == 0)) {
4749 pcs = __pcs_replace_empty_main(s, pcs, gfp);
4750 if (unlikely(!pcs))
4751 return NULL;
4752 }
4753
4754 object = pcs->main->objects[pcs->main->size - 1];
4755
4756 if (unlikely(node_requested)) {
4757 /*
4758 * Verify that the object was from the node we want. This could
4759 * be false because of cpu migration during an unlocked part of
4760 * the current allocation or previous freeing process.
4761 */
4762 if (page_to_nid(virt_to_page(object)) != node) {
4763 local_unlock(&s->cpu_sheaves->lock);
4764 stat(s, ALLOC_NODE_MISMATCH);
4765 return NULL;
4766 }
4767 }
4768
4769 pcs->main->size--;
4770
4771 local_unlock(&s->cpu_sheaves->lock);
4772
4773 stat(s, ALLOC_FASTPATH);
4774
4775 return object;
4776 }
4777
4778 static __fastpath_inline
alloc_from_pcs_bulk(struct kmem_cache * s,gfp_t gfp,size_t size,void ** p)4779 unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, gfp_t gfp, size_t size,
4780 void **p)
4781 {
4782 struct slub_percpu_sheaves *pcs;
4783 struct slab_sheaf *main;
4784 unsigned int allocated = 0;
4785 unsigned int batch;
4786
4787 next_batch:
4788 if (!local_trylock(&s->cpu_sheaves->lock))
4789 return allocated;
4790
4791 pcs = this_cpu_ptr(s->cpu_sheaves);
4792
4793 if (unlikely(pcs->main->size == 0)) {
4794
4795 struct slab_sheaf *full;
4796 struct node_barn *barn;
4797
4798 if (unlikely(!cache_has_sheaves(s))) {
4799 local_unlock(&s->cpu_sheaves->lock);
4800 return allocated;
4801 }
4802
4803 if (pcs->spare && pcs->spare->size > 0) {
4804 swap(pcs->main, pcs->spare);
4805 goto do_alloc;
4806 }
4807
4808 barn = get_barn(s);
4809 if (!barn) {
4810 local_unlock(&s->cpu_sheaves->lock);
4811 return allocated;
4812 }
4813
4814 full = barn_replace_empty_sheaf(barn, pcs->main,
4815 gfpflags_allow_spinning(gfp));
4816
4817 if (full) {
4818 stat(s, BARN_GET);
4819 pcs->main = full;
4820 goto do_alloc;
4821 }
4822
4823 stat(s, BARN_GET_FAIL);
4824
4825 local_unlock(&s->cpu_sheaves->lock);
4826
4827 /*
4828 * Once full sheaves in barn are depleted, let the bulk
4829 * allocation continue from slab pages, otherwise we would just
4830 * be copying arrays of pointers twice.
4831 */
4832 return allocated;
4833 }
4834
4835 do_alloc:
4836
4837 main = pcs->main;
4838 batch = min(size, main->size);
4839
4840 main->size -= batch;
4841 memcpy(p, main->objects + main->size, batch * sizeof(void *));
4842
4843 local_unlock(&s->cpu_sheaves->lock);
4844
4845 stat_add(s, ALLOC_FASTPATH, batch);
4846
4847 allocated += batch;
4848
4849 if (batch < size) {
4850 p += batch;
4851 size -= batch;
4852 goto next_batch;
4853 }
4854
4855 return allocated;
4856 }
4857
4858
4859 /*
4860 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
4861 * have the fastpath folded into their functions. So no function call
4862 * overhead for requests that can be satisfied on the fastpath.
4863 *
4864 * The fastpath works by first checking if the lockless freelist can be used.
4865 * If not then __slab_alloc is called for slow processing.
4866 *
4867 * Otherwise we can simply pick the next object from the lockless free list.
4868 */
slab_alloc_node(struct kmem_cache * s,struct list_lru * lru,gfp_t gfpflags,int node,unsigned long addr,size_t orig_size)4869 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
4870 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4871 {
4872 void *object;
4873 bool init = false;
4874
4875 s = slab_pre_alloc_hook(s, gfpflags);
4876 if (unlikely(!s))
4877 return NULL;
4878
4879 object = kfence_alloc(s, orig_size, gfpflags);
4880 if (unlikely(object))
4881 goto out;
4882
4883 object = alloc_from_pcs(s, gfpflags, node);
4884
4885 if (!object)
4886 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
4887
4888 maybe_wipe_obj_freeptr(s, object);
4889 init = slab_want_init_on_alloc(gfpflags, s);
4890
4891 out:
4892 /*
4893 * When init equals 'true', like for kzalloc() family, only
4894 * @orig_size bytes might be zeroed instead of s->object_size
4895 * In case this fails due to memcg_slab_post_alloc_hook(),
4896 * object is set to NULL
4897 */
4898 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
4899
4900 return object;
4901 }
4902
kmem_cache_alloc_noprof(struct kmem_cache * s,gfp_t gfpflags)4903 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
4904 {
4905 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
4906 s->object_size);
4907
4908 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4909
4910 return ret;
4911 }
4912 EXPORT_SYMBOL(kmem_cache_alloc_noprof);
4913
kmem_cache_alloc_lru_noprof(struct kmem_cache * s,struct list_lru * lru,gfp_t gfpflags)4914 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
4915 gfp_t gfpflags)
4916 {
4917 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
4918 s->object_size);
4919
4920 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4921
4922 return ret;
4923 }
4924 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
4925
kmem_cache_charge(void * objp,gfp_t gfpflags)4926 bool kmem_cache_charge(void *objp, gfp_t gfpflags)
4927 {
4928 if (!memcg_kmem_online())
4929 return true;
4930
4931 return memcg_slab_post_charge(objp, gfpflags);
4932 }
4933 EXPORT_SYMBOL(kmem_cache_charge);
4934
4935 /**
4936 * kmem_cache_alloc_node - Allocate an object on the specified node
4937 * @s: The cache to allocate from.
4938 * @gfpflags: See kmalloc().
4939 * @node: node number of the target node.
4940 *
4941 * Identical to kmem_cache_alloc but it will allocate memory on the given
4942 * node, which can improve the performance for cpu bound structures.
4943 *
4944 * Fallback to other node is possible if __GFP_THISNODE is not set.
4945 *
4946 * Return: pointer to the new object or %NULL in case of error
4947 */
kmem_cache_alloc_node_noprof(struct kmem_cache * s,gfp_t gfpflags,int node)4948 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
4949 {
4950 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4951
4952 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
4953
4954 return ret;
4955 }
4956 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4957
__prefill_sheaf_pfmemalloc(struct kmem_cache * s,struct slab_sheaf * sheaf,gfp_t gfp)4958 static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
4959 struct slab_sheaf *sheaf, gfp_t gfp)
4960 {
4961 gfp_t gfp_nomemalloc;
4962 int ret;
4963
4964 gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
4965 if (gfp_pfmemalloc_allowed(gfp))
4966 gfp_nomemalloc |= __GFP_NOWARN;
4967
4968 ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
4969
4970 if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
4971 return ret;
4972
4973 /*
4974 * if we are allowed to, refill sheaf with pfmemalloc but then remember
4975 * it for when it's returned
4976 */
4977 ret = refill_sheaf(s, sheaf, gfp);
4978 sheaf->pfmemalloc = true;
4979
4980 return ret;
4981 }
4982
4983 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
4984 size_t size, void **p);
4985
4986 /*
4987 * returns a sheaf that has at least the requested size
4988 * when prefilling is needed, do so with given gfp flags
4989 *
4990 * return NULL if sheaf allocation or prefilling failed
4991 */
4992 struct slab_sheaf *
kmem_cache_prefill_sheaf(struct kmem_cache * s,gfp_t gfp,unsigned int size)4993 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
4994 {
4995 struct slub_percpu_sheaves *pcs;
4996 struct slab_sheaf *sheaf = NULL;
4997 struct node_barn *barn;
4998
4999 if (unlikely(!size))
5000 return NULL;
5001
5002 if (unlikely(size > s->sheaf_capacity)) {
5003
5004 sheaf = kzalloc_flex(*sheaf, objects, size, gfp);
5005 if (!sheaf)
5006 return NULL;
5007
5008 stat(s, SHEAF_PREFILL_OVERSIZE);
5009 sheaf->cache = s;
5010 sheaf->capacity = size;
5011
5012 /*
5013 * we do not need to care about pfmemalloc here because oversize
5014 * sheaves area always flushed and freed when returned
5015 */
5016 if (!__kmem_cache_alloc_bulk(s, gfp, size,
5017 &sheaf->objects[0])) {
5018 kfree(sheaf);
5019 return NULL;
5020 }
5021
5022 sheaf->size = size;
5023
5024 return sheaf;
5025 }
5026
5027 local_lock(&s->cpu_sheaves->lock);
5028 pcs = this_cpu_ptr(s->cpu_sheaves);
5029
5030 if (pcs->spare) {
5031 sheaf = pcs->spare;
5032 pcs->spare = NULL;
5033 stat(s, SHEAF_PREFILL_FAST);
5034 } else {
5035 barn = get_barn(s);
5036
5037 stat(s, SHEAF_PREFILL_SLOW);
5038 if (barn)
5039 sheaf = barn_get_full_or_empty_sheaf(barn);
5040 if (sheaf && sheaf->size)
5041 stat(s, BARN_GET);
5042 else
5043 stat(s, BARN_GET_FAIL);
5044 }
5045
5046 local_unlock(&s->cpu_sheaves->lock);
5047
5048
5049 if (!sheaf)
5050 sheaf = alloc_empty_sheaf(s, gfp);
5051
5052 if (sheaf) {
5053 sheaf->capacity = s->sheaf_capacity;
5054 sheaf->pfmemalloc = false;
5055
5056 if (sheaf->size < size &&
5057 __prefill_sheaf_pfmemalloc(s, sheaf, gfp)) {
5058 sheaf_flush_unused(s, sheaf);
5059 free_empty_sheaf(s, sheaf);
5060 sheaf = NULL;
5061 }
5062 }
5063
5064 return sheaf;
5065 }
5066
5067 /*
5068 * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf()
5069 *
5070 * If the sheaf cannot simply become the percpu spare sheaf, but there's space
5071 * for a full sheaf in the barn, we try to refill the sheaf back to the cache's
5072 * sheaf_capacity to avoid handling partially full sheaves.
5073 *
5074 * If the refill fails because gfp is e.g. GFP_NOWAIT, or the barn is full, the
5075 * sheaf is instead flushed and freed.
5076 */
kmem_cache_return_sheaf(struct kmem_cache * s,gfp_t gfp,struct slab_sheaf * sheaf)5077 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
5078 struct slab_sheaf *sheaf)
5079 {
5080 struct slub_percpu_sheaves *pcs;
5081 struct node_barn *barn;
5082
5083 if (unlikely((sheaf->capacity != s->sheaf_capacity)
5084 || sheaf->pfmemalloc)) {
5085 sheaf_flush_unused(s, sheaf);
5086 kfree(sheaf);
5087 return;
5088 }
5089
5090 local_lock(&s->cpu_sheaves->lock);
5091 pcs = this_cpu_ptr(s->cpu_sheaves);
5092 barn = get_barn(s);
5093
5094 if (!pcs->spare) {
5095 pcs->spare = sheaf;
5096 sheaf = NULL;
5097 stat(s, SHEAF_RETURN_FAST);
5098 }
5099
5100 local_unlock(&s->cpu_sheaves->lock);
5101
5102 if (!sheaf)
5103 return;
5104
5105 stat(s, SHEAF_RETURN_SLOW);
5106
5107 /*
5108 * If the barn has too many full sheaves or we fail to refill the sheaf,
5109 * simply flush and free it.
5110 */
5111 if (!barn || data_race(barn->nr_full) >= MAX_FULL_SHEAVES ||
5112 refill_sheaf(s, sheaf, gfp)) {
5113 sheaf_flush_unused(s, sheaf);
5114 free_empty_sheaf(s, sheaf);
5115 return;
5116 }
5117
5118 barn_put_full_sheaf(barn, sheaf);
5119 stat(s, BARN_PUT);
5120 }
5121
5122 /*
5123 * Refill a sheaf previously returned by kmem_cache_prefill_sheaf to at least
5124 * the given size.
5125 *
5126 * Return: 0 on success. The sheaf will contain at least @size objects.
5127 * The sheaf might have been replaced with a new one if more than
5128 * sheaf->capacity objects are requested.
5129 *
5130 * Return: -ENOMEM on failure. Some objects might have been added to the sheaf
5131 * but the sheaf will not be replaced.
5132 *
5133 * In practice we always refill to full sheaf's capacity.
5134 */
kmem_cache_refill_sheaf(struct kmem_cache * s,gfp_t gfp,struct slab_sheaf ** sheafp,unsigned int size)5135 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
5136 struct slab_sheaf **sheafp, unsigned int size)
5137 {
5138 struct slab_sheaf *sheaf;
5139
5140 /*
5141 * TODO: do we want to support *sheaf == NULL to be equivalent of
5142 * kmem_cache_prefill_sheaf() ?
5143 */
5144 if (!sheafp || !(*sheafp))
5145 return -EINVAL;
5146
5147 sheaf = *sheafp;
5148 if (sheaf->size >= size)
5149 return 0;
5150
5151 if (likely(sheaf->capacity >= size)) {
5152 if (likely(sheaf->capacity == s->sheaf_capacity))
5153 return __prefill_sheaf_pfmemalloc(s, sheaf, gfp);
5154
5155 if (!__kmem_cache_alloc_bulk(s, gfp, sheaf->capacity - sheaf->size,
5156 &sheaf->objects[sheaf->size])) {
5157 return -ENOMEM;
5158 }
5159 sheaf->size = sheaf->capacity;
5160
5161 return 0;
5162 }
5163
5164 /*
5165 * We had a regular sized sheaf and need an oversize one, or we had an
5166 * oversize one already but need a larger one now.
5167 * This should be a very rare path so let's not complicate it.
5168 */
5169 sheaf = kmem_cache_prefill_sheaf(s, gfp, size);
5170 if (!sheaf)
5171 return -ENOMEM;
5172
5173 kmem_cache_return_sheaf(s, gfp, *sheafp);
5174 *sheafp = sheaf;
5175 return 0;
5176 }
5177
5178 /*
5179 * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf()
5180 *
5181 * Guaranteed not to fail as many allocations as was the requested size.
5182 * After the sheaf is emptied, it fails - no fallback to the slab cache itself.
5183 *
5184 * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT
5185 * memcg charging is forced over limit if necessary, to avoid failure.
5186 *
5187 * It is possible that the allocation comes from kfence and then the sheaf
5188 * size is not decreased.
5189 */
5190 void *
kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache * s,gfp_t gfp,struct slab_sheaf * sheaf)5191 kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
5192 struct slab_sheaf *sheaf)
5193 {
5194 void *ret = NULL;
5195 bool init;
5196
5197 if (sheaf->size == 0)
5198 goto out;
5199
5200 ret = kfence_alloc(s, s->object_size, gfp);
5201
5202 if (likely(!ret))
5203 ret = sheaf->objects[--sheaf->size];
5204
5205 init = slab_want_init_on_alloc(gfp, s);
5206
5207 /* add __GFP_NOFAIL to force successful memcg charging */
5208 slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size);
5209 out:
5210 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE);
5211
5212 return ret;
5213 }
5214
kmem_cache_sheaf_size(struct slab_sheaf * sheaf)5215 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
5216 {
5217 return sheaf->size;
5218 }
5219 /*
5220 * To avoid unnecessary overhead, we pass through large allocation requests
5221 * directly to the page allocator. We use __GFP_COMP, because we will need to
5222 * know the allocation order to free the pages properly in kfree.
5223 */
___kmalloc_large_node(size_t size,gfp_t flags,int node)5224 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
5225 {
5226 struct page *page;
5227 void *ptr = NULL;
5228 unsigned int order = get_order(size);
5229
5230 if (unlikely(flags & GFP_SLAB_BUG_MASK))
5231 flags = kmalloc_fix_flags(flags);
5232
5233 flags |= __GFP_COMP;
5234
5235 if (node == NUMA_NO_NODE)
5236 page = alloc_frozen_pages_noprof(flags, order);
5237 else
5238 page = __alloc_frozen_pages_noprof(flags, order, node, NULL);
5239
5240 if (page) {
5241 ptr = page_address(page);
5242 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
5243 PAGE_SIZE << order);
5244 __SetPageLargeKmalloc(page);
5245 }
5246
5247 ptr = kasan_kmalloc_large(ptr, size, flags);
5248 /* As ptr might get tagged, call kmemleak hook after KASAN. */
5249 kmemleak_alloc(ptr, size, 1, flags);
5250 kmsan_kmalloc_large(ptr, size, flags);
5251
5252 return ptr;
5253 }
5254
__kmalloc_large_noprof(size_t size,gfp_t flags)5255 void *__kmalloc_large_noprof(size_t size, gfp_t flags)
5256 {
5257 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
5258
5259 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
5260 flags, NUMA_NO_NODE);
5261 return ret;
5262 }
5263 EXPORT_SYMBOL(__kmalloc_large_noprof);
5264
__kmalloc_large_node_noprof(size_t size,gfp_t flags,int node)5265 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
5266 {
5267 void *ret = ___kmalloc_large_node(size, flags, node);
5268
5269 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
5270 flags, node);
5271 return ret;
5272 }
5273 EXPORT_SYMBOL(__kmalloc_large_node_noprof);
5274
5275 static __always_inline
__do_kmalloc_node(size_t size,kmem_buckets * b,gfp_t flags,int node,unsigned long caller)5276 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
5277 unsigned long caller)
5278 {
5279 struct kmem_cache *s;
5280 void *ret;
5281
5282 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5283 ret = __kmalloc_large_node_noprof(size, flags, node);
5284 trace_kmalloc(caller, ret, size,
5285 PAGE_SIZE << get_order(size), flags, node);
5286 return ret;
5287 }
5288
5289 if (unlikely(!size))
5290 return ZERO_SIZE_PTR;
5291
5292 s = kmalloc_slab(size, b, flags, caller);
5293
5294 ret = slab_alloc_node(s, NULL, flags, node, caller, size);
5295 ret = kasan_kmalloc(s, ret, size, flags);
5296 trace_kmalloc(caller, ret, size, s->size, flags, node);
5297 return ret;
5298 }
__kmalloc_node_noprof(DECL_BUCKET_PARAMS (size,b),gfp_t flags,int node)5299 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
5300 {
5301 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
5302 }
5303 EXPORT_SYMBOL(__kmalloc_node_noprof);
5304
__kmalloc_noprof(size_t size,gfp_t flags)5305 void *__kmalloc_noprof(size_t size, gfp_t flags)
5306 {
5307 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
5308 }
5309 EXPORT_SYMBOL(__kmalloc_noprof);
5310
5311 /**
5312 * kmalloc_nolock - Allocate an object of given size from any context.
5313 * @size: size to allocate
5314 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT, __GFP_ZERO, __GFP_NO_OBJ_EXT
5315 * allowed.
5316 * @node: node number of the target node.
5317 *
5318 * Return: pointer to the new object or NULL in case of error.
5319 * NULL does not mean EBUSY or EAGAIN. It means ENOMEM.
5320 * There is no reason to call it again and expect !NULL.
5321 */
kmalloc_nolock_noprof(size_t size,gfp_t gfp_flags,int node)5322 void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
5323 {
5324 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_NOMEMALLOC | gfp_flags;
5325 struct kmem_cache *s;
5326 bool can_retry = true;
5327 void *ret;
5328
5329 VM_WARN_ON_ONCE(gfp_flags & ~(__GFP_ACCOUNT | __GFP_ZERO |
5330 __GFP_NO_OBJ_EXT));
5331
5332 if (unlikely(!size))
5333 return ZERO_SIZE_PTR;
5334
5335 /*
5336 * See the comment for the same check in
5337 * alloc_frozen_pages_nolock_noprof()
5338 */
5339 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
5340 return NULL;
5341
5342 /* On UP, spin_trylock() always succeeds even when it is locked */
5343 if (!IS_ENABLED(CONFIG_SMP) && in_nmi())
5344 return NULL;
5345
5346 retry:
5347 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
5348 return NULL;
5349 s = kmalloc_slab(size, NULL, alloc_gfp, _RET_IP_);
5350
5351 if (!(s->flags & __CMPXCHG_DOUBLE) && !kmem_cache_debug(s))
5352 /*
5353 * kmalloc_nolock() is not supported on architectures that
5354 * don't implement cmpxchg16b and thus need slab_lock()
5355 * which could be preempted by a nmi.
5356 * But debug caches don't use that and only rely on
5357 * kmem_cache_node->list_lock, so kmalloc_nolock() can attempt
5358 * to allocate from debug caches by
5359 * spin_trylock_irqsave(&n->list_lock, ...)
5360 */
5361 return NULL;
5362
5363 ret = alloc_from_pcs(s, alloc_gfp, node);
5364 if (ret)
5365 goto success;
5366
5367 /*
5368 * Do not call slab_alloc_node(), since trylock mode isn't
5369 * compatible with slab_pre_alloc_hook/should_failslab and
5370 * kfence_alloc. Hence call __slab_alloc_node() (at most twice)
5371 * and slab_post_alloc_hook() directly.
5372 */
5373 ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size);
5374
5375 /*
5376 * It's possible we failed due to trylock as we preempted someone with
5377 * the sheaves locked, and the list_lock is also held by another cpu.
5378 * But it should be rare that multiple kmalloc buckets would have
5379 * sheaves locked, so try a larger one.
5380 */
5381 if (!ret && can_retry) {
5382 /* pick the next kmalloc bucket */
5383 size = s->object_size + 1;
5384 /*
5385 * Another alternative is to
5386 * if (memcg) alloc_gfp &= ~__GFP_ACCOUNT;
5387 * else if (!memcg) alloc_gfp |= __GFP_ACCOUNT;
5388 * to retry from bucket of the same size.
5389 */
5390 can_retry = false;
5391 goto retry;
5392 }
5393
5394 success:
5395 maybe_wipe_obj_freeptr(s, ret);
5396 slab_post_alloc_hook(s, NULL, alloc_gfp, 1, &ret,
5397 slab_want_init_on_alloc(alloc_gfp, s), size);
5398
5399 ret = kasan_kmalloc(s, ret, size, alloc_gfp);
5400 return ret;
5401 }
5402 EXPORT_SYMBOL_GPL(kmalloc_nolock_noprof);
5403
__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS (size,b),gfp_t flags,int node,unsigned long caller)5404 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
5405 int node, unsigned long caller)
5406 {
5407 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
5408
5409 }
5410 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
5411
__kmalloc_cache_noprof(struct kmem_cache * s,gfp_t gfpflags,size_t size)5412 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
5413 {
5414 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
5415 _RET_IP_, size);
5416
5417 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
5418
5419 ret = kasan_kmalloc(s, ret, size, gfpflags);
5420 return ret;
5421 }
5422 EXPORT_SYMBOL(__kmalloc_cache_noprof);
5423
__kmalloc_cache_node_noprof(struct kmem_cache * s,gfp_t gfpflags,int node,size_t size)5424 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
5425 int node, size_t size)
5426 {
5427 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
5428
5429 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
5430
5431 ret = kasan_kmalloc(s, ret, size, gfpflags);
5432 return ret;
5433 }
5434 EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
5435
free_to_partial_list(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int bulk_cnt,unsigned long addr)5436 static noinline void free_to_partial_list(
5437 struct kmem_cache *s, struct slab *slab,
5438 void *head, void *tail, int bulk_cnt,
5439 unsigned long addr)
5440 {
5441 struct kmem_cache_node *n = get_node(s, slab_nid(slab));
5442 struct slab *slab_free = NULL;
5443 int cnt = bulk_cnt;
5444 unsigned long flags;
5445 depot_stack_handle_t handle = 0;
5446
5447 /*
5448 * We cannot use GFP_NOWAIT as there are callsites where waking up
5449 * kswapd could deadlock
5450 */
5451 if (s->flags & SLAB_STORE_USER)
5452 handle = set_track_prepare(__GFP_NOWARN);
5453
5454 spin_lock_irqsave(&n->list_lock, flags);
5455
5456 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
5457 void *prior = slab->freelist;
5458
5459 /* Perform the actual freeing while we still hold the locks */
5460 slab->inuse -= cnt;
5461 set_freepointer(s, tail, prior);
5462 slab->freelist = head;
5463
5464 /*
5465 * If the slab is empty, and node's partial list is full,
5466 * it should be discarded anyway no matter it's on full or
5467 * partial list.
5468 */
5469 if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
5470 slab_free = slab;
5471
5472 if (!prior) {
5473 /* was on full list */
5474 remove_full(s, n, slab);
5475 if (!slab_free) {
5476 add_partial(n, slab, ADD_TO_TAIL);
5477 stat(s, FREE_ADD_PARTIAL);
5478 }
5479 } else if (slab_free) {
5480 remove_partial(n, slab);
5481 stat(s, FREE_REMOVE_PARTIAL);
5482 }
5483 }
5484
5485 if (slab_free) {
5486 /*
5487 * Update the counters while still holding n->list_lock to
5488 * prevent spurious validation warnings
5489 */
5490 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
5491 }
5492
5493 spin_unlock_irqrestore(&n->list_lock, flags);
5494
5495 if (slab_free) {
5496 stat(s, FREE_SLAB);
5497 free_slab(s, slab_free);
5498 }
5499 }
5500
5501 /*
5502 * Slow path handling. This may still be called frequently since objects
5503 * have a longer lifetime than the cpu slabs in most processing loads.
5504 *
5505 * So we still attempt to reduce cache line usage. Just take the slab
5506 * lock and free the item. If there is no additional partial slab
5507 * handling required then we can return immediately.
5508 */
__slab_free(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int cnt,unsigned long addr)5509 static void __slab_free(struct kmem_cache *s, struct slab *slab,
5510 void *head, void *tail, int cnt,
5511 unsigned long addr)
5512
5513 {
5514 bool was_full;
5515 struct freelist_counters old, new;
5516 struct kmem_cache_node *n = NULL;
5517 unsigned long flags;
5518 bool on_node_partial;
5519
5520 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
5521 free_to_partial_list(s, slab, head, tail, cnt, addr);
5522 return;
5523 }
5524
5525 do {
5526 if (unlikely(n)) {
5527 spin_unlock_irqrestore(&n->list_lock, flags);
5528 n = NULL;
5529 }
5530
5531 old.freelist = slab->freelist;
5532 old.counters = slab->counters;
5533
5534 was_full = (old.freelist == NULL);
5535
5536 set_freepointer(s, tail, old.freelist);
5537
5538 new.freelist = head;
5539 new.counters = old.counters;
5540 new.inuse -= cnt;
5541
5542 /*
5543 * Might need to be taken off (due to becoming empty) or added
5544 * to (due to not being full anymore) the partial list.
5545 * Unless it's frozen.
5546 */
5547 if (!new.inuse || was_full) {
5548
5549 n = get_node(s, slab_nid(slab));
5550 /*
5551 * Speculatively acquire the list_lock.
5552 * If the cmpxchg does not succeed then we may
5553 * drop the list_lock without any processing.
5554 *
5555 * Otherwise the list_lock will synchronize with
5556 * other processors updating the list of slabs.
5557 */
5558 spin_lock_irqsave(&n->list_lock, flags);
5559
5560 on_node_partial = slab_test_node_partial(slab);
5561 }
5562
5563 } while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
5564
5565 if (likely(!n)) {
5566 /*
5567 * We didn't take the list_lock because the slab was already on
5568 * the partial list and will remain there.
5569 */
5570 return;
5571 }
5572
5573 /*
5574 * This slab was partially empty but not on the per-node partial list,
5575 * in which case we shouldn't manipulate its list, just return.
5576 */
5577 if (!was_full && !on_node_partial) {
5578 spin_unlock_irqrestore(&n->list_lock, flags);
5579 return;
5580 }
5581
5582 /*
5583 * If slab became empty, should we add/keep it on the partial list or we
5584 * have enough?
5585 */
5586 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
5587 goto slab_empty;
5588
5589 /*
5590 * Objects left in the slab. If it was not on the partial list before
5591 * then add it.
5592 */
5593 if (unlikely(was_full)) {
5594 add_partial(n, slab, ADD_TO_TAIL);
5595 stat(s, FREE_ADD_PARTIAL);
5596 }
5597 spin_unlock_irqrestore(&n->list_lock, flags);
5598 return;
5599
5600 slab_empty:
5601 /*
5602 * The slab could have a single object and thus go from full to empty in
5603 * a single free, but more likely it was on the partial list. Remove it.
5604 */
5605 if (likely(!was_full)) {
5606 remove_partial(n, slab);
5607 stat(s, FREE_REMOVE_PARTIAL);
5608 }
5609
5610 spin_unlock_irqrestore(&n->list_lock, flags);
5611 stat(s, FREE_SLAB);
5612 discard_slab(s, slab);
5613 }
5614
5615 /*
5616 * pcs is locked. We should have get rid of the spare sheaf and obtained an
5617 * empty sheaf, while the main sheaf is full. We want to install the empty sheaf
5618 * as a main sheaf, and make the current main sheaf a spare sheaf.
5619 *
5620 * However due to having relinquished the cpu_sheaves lock when obtaining
5621 * the empty sheaf, we need to handle some unlikely but possible cases.
5622 *
5623 * If we put any sheaf to barn here, it's because we were interrupted or have
5624 * been migrated to a different cpu, which should be rare enough so just ignore
5625 * the barn's limits to simplify the handling.
5626 *
5627 * An alternative scenario that gets us here is when we fail
5628 * barn_replace_full_sheaf(), because there's no empty sheaf available in the
5629 * barn, so we had to allocate it by alloc_empty_sheaf(). But because we saw the
5630 * limit on full sheaves was not exceeded, we assume it didn't change and just
5631 * put the full sheaf there.
5632 */
__pcs_install_empty_sheaf(struct kmem_cache * s,struct slub_percpu_sheaves * pcs,struct slab_sheaf * empty,struct node_barn * barn)5633 static void __pcs_install_empty_sheaf(struct kmem_cache *s,
5634 struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty,
5635 struct node_barn *barn)
5636 {
5637 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
5638
5639 /* This is what we expect to find if nobody interrupted us. */
5640 if (likely(!pcs->spare)) {
5641 pcs->spare = pcs->main;
5642 pcs->main = empty;
5643 return;
5644 }
5645
5646 /*
5647 * Unlikely because if the main sheaf had space, we would have just
5648 * freed to it. Get rid of our empty sheaf.
5649 */
5650 if (pcs->main->size < s->sheaf_capacity) {
5651 barn_put_empty_sheaf(barn, empty);
5652 return;
5653 }
5654
5655 /* Also unlikely for the same reason */
5656 if (pcs->spare->size < s->sheaf_capacity) {
5657 swap(pcs->main, pcs->spare);
5658 barn_put_empty_sheaf(barn, empty);
5659 return;
5660 }
5661
5662 /*
5663 * We probably failed barn_replace_full_sheaf() due to no empty sheaf
5664 * available there, but we allocated one, so finish the job.
5665 */
5666 barn_put_full_sheaf(barn, pcs->main);
5667 stat(s, BARN_PUT);
5668 pcs->main = empty;
5669 }
5670
5671 /*
5672 * Replace the full main sheaf with a (at least partially) empty sheaf.
5673 *
5674 * Must be called with the cpu_sheaves local lock locked. If successful, returns
5675 * the pcs pointer and the local lock locked (possibly on a different cpu than
5676 * initially called). If not successful, returns NULL and the local lock
5677 * unlocked.
5678 */
5679 static struct slub_percpu_sheaves *
__pcs_replace_full_main(struct kmem_cache * s,struct slub_percpu_sheaves * pcs,bool allow_spin)5680 __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
5681 bool allow_spin)
5682 {
5683 struct slab_sheaf *empty;
5684 struct node_barn *barn;
5685 bool put_fail;
5686
5687 restart:
5688 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
5689
5690 /* Bootstrap or debug cache, back off */
5691 if (unlikely(!cache_has_sheaves(s))) {
5692 local_unlock(&s->cpu_sheaves->lock);
5693 return NULL;
5694 }
5695
5696 barn = get_barn(s);
5697 if (!barn) {
5698 local_unlock(&s->cpu_sheaves->lock);
5699 return NULL;
5700 }
5701
5702 put_fail = false;
5703
5704 if (!pcs->spare) {
5705 empty = barn_get_empty_sheaf(barn, allow_spin);
5706 if (empty) {
5707 pcs->spare = pcs->main;
5708 pcs->main = empty;
5709 return pcs;
5710 }
5711 goto alloc_empty;
5712 }
5713
5714 if (pcs->spare->size < s->sheaf_capacity) {
5715 swap(pcs->main, pcs->spare);
5716 return pcs;
5717 }
5718
5719 empty = barn_replace_full_sheaf(barn, pcs->main, allow_spin);
5720
5721 if (!IS_ERR(empty)) {
5722 stat(s, BARN_PUT);
5723 pcs->main = empty;
5724 return pcs;
5725 }
5726
5727 /* sheaf_flush_unused() doesn't support !allow_spin */
5728 if (PTR_ERR(empty) == -E2BIG && allow_spin) {
5729 /* Since we got here, spare exists and is full */
5730 struct slab_sheaf *to_flush = pcs->spare;
5731
5732 stat(s, BARN_PUT_FAIL);
5733
5734 pcs->spare = NULL;
5735 local_unlock(&s->cpu_sheaves->lock);
5736
5737 sheaf_flush_unused(s, to_flush);
5738 empty = to_flush;
5739 goto got_empty;
5740 }
5741
5742 /*
5743 * We could not replace full sheaf because barn had no empty
5744 * sheaves. We can still allocate it and put the full sheaf in
5745 * __pcs_install_empty_sheaf(), but if we fail to allocate it,
5746 * make sure to count the fail.
5747 */
5748 put_fail = true;
5749
5750 alloc_empty:
5751 local_unlock(&s->cpu_sheaves->lock);
5752
5753 /*
5754 * alloc_empty_sheaf() doesn't support !allow_spin and it's
5755 * easier to fall back to freeing directly without sheaves
5756 * than add the support (and to sheaf_flush_unused() above)
5757 */
5758 if (!allow_spin)
5759 return NULL;
5760
5761 empty = alloc_empty_sheaf(s, GFP_NOWAIT);
5762 if (empty)
5763 goto got_empty;
5764
5765 if (put_fail)
5766 stat(s, BARN_PUT_FAIL);
5767
5768 if (!sheaf_try_flush_main(s))
5769 return NULL;
5770
5771 if (!local_trylock(&s->cpu_sheaves->lock))
5772 return NULL;
5773
5774 pcs = this_cpu_ptr(s->cpu_sheaves);
5775
5776 /*
5777 * we flushed the main sheaf so it should be empty now,
5778 * but in case we got preempted or migrated, we need to
5779 * check again
5780 */
5781 if (pcs->main->size == s->sheaf_capacity)
5782 goto restart;
5783
5784 return pcs;
5785
5786 got_empty:
5787 if (!local_trylock(&s->cpu_sheaves->lock)) {
5788 barn_put_empty_sheaf(barn, empty);
5789 return NULL;
5790 }
5791
5792 pcs = this_cpu_ptr(s->cpu_sheaves);
5793 __pcs_install_empty_sheaf(s, pcs, empty, barn);
5794
5795 return pcs;
5796 }
5797
5798 /*
5799 * Free an object to the percpu sheaves.
5800 * The object is expected to have passed slab_free_hook() already.
5801 */
5802 static __fastpath_inline
free_to_pcs(struct kmem_cache * s,void * object,bool allow_spin)5803 bool free_to_pcs(struct kmem_cache *s, void *object, bool allow_spin)
5804 {
5805 struct slub_percpu_sheaves *pcs;
5806
5807 if (!local_trylock(&s->cpu_sheaves->lock))
5808 return false;
5809
5810 pcs = this_cpu_ptr(s->cpu_sheaves);
5811
5812 if (unlikely(pcs->main->size == s->sheaf_capacity)) {
5813
5814 pcs = __pcs_replace_full_main(s, pcs, allow_spin);
5815 if (unlikely(!pcs))
5816 return false;
5817 }
5818
5819 pcs->main->objects[pcs->main->size++] = object;
5820
5821 local_unlock(&s->cpu_sheaves->lock);
5822
5823 stat(s, FREE_FASTPATH);
5824
5825 return true;
5826 }
5827
rcu_free_sheaf(struct rcu_head * head)5828 static void rcu_free_sheaf(struct rcu_head *head)
5829 {
5830 struct slab_sheaf *sheaf;
5831 struct node_barn *barn = NULL;
5832 struct kmem_cache *s;
5833
5834 sheaf = container_of(head, struct slab_sheaf, rcu_head);
5835
5836 s = sheaf->cache;
5837
5838 /*
5839 * This may remove some objects due to slab_free_hook() returning false,
5840 * so that the sheaf might no longer be completely full. But it's easier
5841 * to handle it as full (unless it became completely empty), as the code
5842 * handles it fine. The only downside is that sheaf will serve fewer
5843 * allocations when reused. It only happens due to debugging, which is a
5844 * performance hit anyway.
5845 *
5846 * If it returns true, there was at least one object from pfmemalloc
5847 * slab so simply flush everything.
5848 */
5849 if (__rcu_free_sheaf_prepare(s, sheaf))
5850 goto flush;
5851
5852 barn = get_barn_node(s, sheaf->node);
5853 if (!barn)
5854 goto flush;
5855
5856 /* due to slab_free_hook() */
5857 if (unlikely(sheaf->size == 0))
5858 goto empty;
5859
5860 /*
5861 * Checking nr_full/nr_empty outside lock avoids contention in case the
5862 * barn is at the respective limit. Due to the race we might go over the
5863 * limit but that should be rare and harmless.
5864 */
5865
5866 if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) {
5867 stat(s, BARN_PUT);
5868 barn_put_full_sheaf(barn, sheaf);
5869 return;
5870 }
5871
5872 flush:
5873 stat(s, BARN_PUT_FAIL);
5874 sheaf_flush_unused(s, sheaf);
5875
5876 empty:
5877 if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
5878 barn_put_empty_sheaf(barn, sheaf);
5879 return;
5880 }
5881
5882 free_empty_sheaf(s, sheaf);
5883 }
5884
5885 /*
5886 * kvfree_call_rcu() can be called while holding a raw_spinlock_t. Since
5887 * __kfree_rcu_sheaf() may acquire a spinlock_t (sleeping lock on PREEMPT_RT),
5888 * this would violate lock nesting rules. Therefore, kvfree_call_rcu() avoids
5889 * this problem by bypassing the sheaves layer entirely on PREEMPT_RT.
5890 *
5891 * However, lockdep still complains that it is invalid to acquire spinlock_t
5892 * while holding raw_spinlock_t, even on !PREEMPT_RT where spinlock_t is a
5893 * spinning lock. Tell lockdep that acquiring spinlock_t is valid here
5894 * by temporarily raising the wait-type to LD_WAIT_CONFIG.
5895 */
5896 static DEFINE_WAIT_OVERRIDE_MAP(kfree_rcu_sheaf_map, LD_WAIT_CONFIG);
5897
__kfree_rcu_sheaf(struct kmem_cache * s,void * obj)5898 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
5899 {
5900 struct slub_percpu_sheaves *pcs;
5901 struct slab_sheaf *rcu_sheaf;
5902
5903 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
5904 return false;
5905
5906 lock_map_acquire_try(&kfree_rcu_sheaf_map);
5907
5908 if (!local_trylock(&s->cpu_sheaves->lock))
5909 goto fail;
5910
5911 pcs = this_cpu_ptr(s->cpu_sheaves);
5912
5913 if (unlikely(!pcs->rcu_free)) {
5914
5915 struct slab_sheaf *empty;
5916 struct node_barn *barn;
5917
5918 /* Bootstrap or debug cache, fall back */
5919 if (unlikely(!cache_has_sheaves(s))) {
5920 local_unlock(&s->cpu_sheaves->lock);
5921 goto fail;
5922 }
5923
5924 if (pcs->spare && pcs->spare->size == 0) {
5925 pcs->rcu_free = pcs->spare;
5926 pcs->spare = NULL;
5927 goto do_free;
5928 }
5929
5930 barn = get_barn(s);
5931 if (!barn) {
5932 local_unlock(&s->cpu_sheaves->lock);
5933 goto fail;
5934 }
5935
5936 empty = barn_get_empty_sheaf(barn, true);
5937
5938 if (empty) {
5939 pcs->rcu_free = empty;
5940 goto do_free;
5941 }
5942
5943 local_unlock(&s->cpu_sheaves->lock);
5944
5945 empty = alloc_empty_sheaf(s, GFP_NOWAIT);
5946
5947 if (!empty)
5948 goto fail;
5949
5950 if (!local_trylock(&s->cpu_sheaves->lock)) {
5951 barn_put_empty_sheaf(barn, empty);
5952 goto fail;
5953 }
5954
5955 pcs = this_cpu_ptr(s->cpu_sheaves);
5956
5957 if (unlikely(pcs->rcu_free))
5958 barn_put_empty_sheaf(barn, empty);
5959 else
5960 pcs->rcu_free = empty;
5961 }
5962
5963 do_free:
5964
5965 rcu_sheaf = pcs->rcu_free;
5966
5967 /*
5968 * Since we flush immediately when size reaches capacity, we never reach
5969 * this with size already at capacity, so no OOB write is possible.
5970 */
5971 rcu_sheaf->objects[rcu_sheaf->size++] = obj;
5972
5973 if (likely(rcu_sheaf->size < s->sheaf_capacity)) {
5974 rcu_sheaf = NULL;
5975 } else {
5976 pcs->rcu_free = NULL;
5977 rcu_sheaf->node = numa_node_id();
5978 }
5979
5980 /*
5981 * we flush before local_unlock to make sure a racing
5982 * flush_all_rcu_sheaves() doesn't miss this sheaf
5983 */
5984 if (rcu_sheaf)
5985 call_rcu(&rcu_sheaf->rcu_head, rcu_free_sheaf);
5986
5987 local_unlock(&s->cpu_sheaves->lock);
5988
5989 stat(s, FREE_RCU_SHEAF);
5990 lock_map_release(&kfree_rcu_sheaf_map);
5991 return true;
5992
5993 fail:
5994 stat(s, FREE_RCU_SHEAF_FAIL);
5995 lock_map_release(&kfree_rcu_sheaf_map);
5996 return false;
5997 }
5998
can_free_to_pcs(struct slab * slab)5999 static __always_inline bool can_free_to_pcs(struct slab *slab)
6000 {
6001 int slab_node;
6002 int numa_node;
6003
6004 if (!IS_ENABLED(CONFIG_NUMA))
6005 goto check_pfmemalloc;
6006
6007 slab_node = slab_nid(slab);
6008
6009 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6010 /*
6011 * numa_mem_id() points to the closest node with memory so only allow
6012 * objects from that node to the percpu sheaves
6013 */
6014 numa_node = numa_mem_id();
6015
6016 if (likely(slab_node == numa_node))
6017 goto check_pfmemalloc;
6018 #else
6019
6020 /*
6021 * numa_mem_id() is only a wrapper to numa_node_id() which is where this
6022 * cpu belongs to, but it might be a memoryless node anyway. We don't
6023 * know what the closest node is.
6024 */
6025 numa_node = numa_node_id();
6026
6027 /* freed object is from this cpu's node, proceed */
6028 if (likely(slab_node == numa_node))
6029 goto check_pfmemalloc;
6030
6031 /*
6032 * Freed object isn't from this cpu's node, but that node is memoryless
6033 * or only has ZONE_MOVABLE memory, which slab cannot allocate from.
6034 * Proceed as it's better to cache remote objects than falling back to
6035 * the slowpath for everything. The allocation side can never obtain
6036 * a local object anyway, if none exist. We don't have numa_mem_id() to
6037 * point to the closest node as we would on a proper memoryless node
6038 * setup.
6039 */
6040 if (unlikely(!node_state(numa_node, N_NORMAL_MEMORY)))
6041 goto check_pfmemalloc;
6042 #endif
6043
6044 return false;
6045
6046 check_pfmemalloc:
6047 return likely(!slab_test_pfmemalloc(slab));
6048 }
6049
6050 /*
6051 * Bulk free objects to the percpu sheaves.
6052 * Unlike free_to_pcs() this includes the calls to all necessary hooks
6053 * and the fallback to freeing to slab pages.
6054 */
free_to_pcs_bulk(struct kmem_cache * s,size_t size,void ** p)6055 static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
6056 {
6057 struct slub_percpu_sheaves *pcs;
6058 struct slab_sheaf *main, *empty;
6059 bool init = slab_want_init_on_free(s);
6060 unsigned int batch, i = 0;
6061 struct node_barn *barn;
6062 void *remote_objects[PCS_BATCH_MAX];
6063 unsigned int remote_nr = 0;
6064
6065 next_remote_batch:
6066 while (i < size) {
6067 struct slab *slab = virt_to_slab(p[i]);
6068
6069 memcg_slab_free_hook(s, slab, p + i, 1);
6070 alloc_tagging_slab_free_hook(s, slab, p + i, 1);
6071
6072 if (unlikely(!slab_free_hook(s, p[i], init, false))) {
6073 p[i] = p[--size];
6074 continue;
6075 }
6076
6077 if (unlikely(!can_free_to_pcs(slab))) {
6078 remote_objects[remote_nr] = p[i];
6079 p[i] = p[--size];
6080 if (++remote_nr >= PCS_BATCH_MAX)
6081 goto flush_remote;
6082 continue;
6083 }
6084
6085 i++;
6086 }
6087
6088 if (!size)
6089 goto flush_remote;
6090
6091 next_batch:
6092 if (!local_trylock(&s->cpu_sheaves->lock))
6093 goto fallback;
6094
6095 pcs = this_cpu_ptr(s->cpu_sheaves);
6096
6097 if (likely(pcs->main->size < s->sheaf_capacity))
6098 goto do_free;
6099
6100 barn = get_barn(s);
6101 if (!barn)
6102 goto no_empty;
6103
6104 if (!pcs->spare) {
6105 empty = barn_get_empty_sheaf(barn, true);
6106 if (!empty)
6107 goto no_empty;
6108
6109 pcs->spare = pcs->main;
6110 pcs->main = empty;
6111 goto do_free;
6112 }
6113
6114 if (pcs->spare->size < s->sheaf_capacity) {
6115 swap(pcs->main, pcs->spare);
6116 goto do_free;
6117 }
6118
6119 empty = barn_replace_full_sheaf(barn, pcs->main, true);
6120 if (IS_ERR(empty)) {
6121 stat(s, BARN_PUT_FAIL);
6122 goto no_empty;
6123 }
6124
6125 stat(s, BARN_PUT);
6126 pcs->main = empty;
6127
6128 do_free:
6129 main = pcs->main;
6130 batch = min(size, s->sheaf_capacity - main->size);
6131
6132 memcpy(main->objects + main->size, p, batch * sizeof(void *));
6133 main->size += batch;
6134
6135 local_unlock(&s->cpu_sheaves->lock);
6136
6137 stat_add(s, FREE_FASTPATH, batch);
6138
6139 if (batch < size) {
6140 p += batch;
6141 size -= batch;
6142 goto next_batch;
6143 }
6144
6145 if (remote_nr)
6146 goto flush_remote;
6147
6148 return;
6149
6150 no_empty:
6151 local_unlock(&s->cpu_sheaves->lock);
6152
6153 /*
6154 * if we depleted all empty sheaves in the barn or there are too
6155 * many full sheaves, free the rest to slab pages
6156 */
6157 fallback:
6158 __kmem_cache_free_bulk(s, size, p);
6159 stat_add(s, FREE_SLOWPATH, size);
6160
6161 flush_remote:
6162 if (remote_nr) {
6163 __kmem_cache_free_bulk(s, remote_nr, &remote_objects[0]);
6164 stat_add(s, FREE_SLOWPATH, remote_nr);
6165 if (i < size) {
6166 remote_nr = 0;
6167 goto next_remote_batch;
6168 }
6169 }
6170 }
6171
6172 struct defer_free {
6173 struct llist_head objects;
6174 struct irq_work work;
6175 };
6176
6177 static void free_deferred_objects(struct irq_work *work);
6178
6179 static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = {
6180 .objects = LLIST_HEAD_INIT(objects),
6181 .work = IRQ_WORK_INIT(free_deferred_objects),
6182 };
6183
6184 /*
6185 * In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe
6186 * to take sleeping spin_locks from __slab_free().
6187 * In !PREEMPT_RT irq_work will run after local_unlock_irqrestore().
6188 */
free_deferred_objects(struct irq_work * work)6189 static void free_deferred_objects(struct irq_work *work)
6190 {
6191 struct defer_free *df = container_of(work, struct defer_free, work);
6192 struct llist_head *objs = &df->objects;
6193 struct llist_node *llnode, *pos, *t;
6194
6195 if (llist_empty(objs))
6196 return;
6197
6198 llnode = llist_del_all(objs);
6199 llist_for_each_safe(pos, t, llnode) {
6200 struct kmem_cache *s;
6201 struct slab *slab;
6202 void *x = pos;
6203
6204 slab = virt_to_slab(x);
6205 s = slab->slab_cache;
6206
6207 /* Point 'x' back to the beginning of allocated object */
6208 x -= s->offset;
6209
6210 /*
6211 * We used freepointer in 'x' to link 'x' into df->objects.
6212 * Clear it to NULL to avoid false positive detection
6213 * of "Freepointer corruption".
6214 */
6215 set_freepointer(s, x, NULL);
6216
6217 __slab_free(s, slab, x, x, 1, _THIS_IP_);
6218 stat(s, FREE_SLOWPATH);
6219 }
6220 }
6221
defer_free(struct kmem_cache * s,void * head)6222 static void defer_free(struct kmem_cache *s, void *head)
6223 {
6224 struct defer_free *df;
6225
6226 guard(preempt)();
6227
6228 head = kasan_reset_tag(head);
6229
6230 df = this_cpu_ptr(&defer_free_objects);
6231 if (llist_add(head + s->offset, &df->objects))
6232 irq_work_queue(&df->work);
6233 }
6234
defer_free_barrier(void)6235 void defer_free_barrier(void)
6236 {
6237 int cpu;
6238
6239 for_each_possible_cpu(cpu)
6240 irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
6241 }
6242
6243 static __fastpath_inline
slab_free(struct kmem_cache * s,struct slab * slab,void * object,unsigned long addr)6244 void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
6245 unsigned long addr)
6246 {
6247 memcg_slab_free_hook(s, slab, &object, 1);
6248 alloc_tagging_slab_free_hook(s, slab, &object, 1);
6249
6250 if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), false)))
6251 return;
6252
6253 if (likely(can_free_to_pcs(slab)) && likely(free_to_pcs(s, object, true)))
6254 return;
6255
6256 __slab_free(s, slab, object, object, 1, addr);
6257 stat(s, FREE_SLOWPATH);
6258 }
6259
6260 #ifdef CONFIG_MEMCG
6261 /* Do not inline the rare memcg charging failed path into the allocation path */
6262 static noinline
memcg_alloc_abort_single(struct kmem_cache * s,void * object)6263 void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
6264 {
6265 struct slab *slab = virt_to_slab(object);
6266
6267 alloc_tagging_slab_free_hook(s, slab, &object, 1);
6268
6269 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
6270 __slab_free(s, slab, object, object, 1, _RET_IP_);
6271 }
6272 #endif
6273
6274 static __fastpath_inline
slab_free_bulk(struct kmem_cache * s,struct slab * slab,void * head,void * tail,void ** p,int cnt,unsigned long addr)6275 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
6276 void *tail, void **p, int cnt, unsigned long addr)
6277 {
6278 memcg_slab_free_hook(s, slab, p, cnt);
6279 alloc_tagging_slab_free_hook(s, slab, p, cnt);
6280 /*
6281 * With KASAN enabled slab_free_freelist_hook modifies the freelist
6282 * to remove objects, whose reuse must be delayed.
6283 */
6284 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) {
6285 __slab_free(s, slab, head, tail, cnt, addr);
6286 stat_add(s, FREE_SLOWPATH, cnt);
6287 }
6288 }
6289
6290 #ifdef CONFIG_SLUB_RCU_DEBUG
slab_free_after_rcu_debug(struct rcu_head * rcu_head)6291 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
6292 {
6293 struct rcu_delayed_free *delayed_free =
6294 container_of(rcu_head, struct rcu_delayed_free, head);
6295 void *object = delayed_free->object;
6296 struct slab *slab = virt_to_slab(object);
6297 struct kmem_cache *s;
6298
6299 kfree(delayed_free);
6300
6301 if (WARN_ON(is_kfence_address(object)))
6302 return;
6303
6304 /* find the object and the cache again */
6305 if (WARN_ON(!slab))
6306 return;
6307 s = slab->slab_cache;
6308 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
6309 return;
6310
6311 /* resume freeing */
6312 if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) {
6313 __slab_free(s, slab, object, object, 1, _THIS_IP_);
6314 stat(s, FREE_SLOWPATH);
6315 }
6316 }
6317 #endif /* CONFIG_SLUB_RCU_DEBUG */
6318
6319 #ifdef CONFIG_KASAN_GENERIC
___cache_free(struct kmem_cache * cache,void * x,unsigned long addr)6320 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
6321 {
6322 __slab_free(cache, virt_to_slab(x), x, x, 1, addr);
6323 stat(cache, FREE_SLOWPATH);
6324 }
6325 #endif
6326
warn_free_bad_obj(struct kmem_cache * s,void * obj)6327 static noinline void warn_free_bad_obj(struct kmem_cache *s, void *obj)
6328 {
6329 struct kmem_cache *cachep;
6330 struct slab *slab;
6331
6332 slab = virt_to_slab(obj);
6333 if (WARN_ONCE(!slab,
6334 "kmem_cache_free(%s, %p): object is not in a slab page\n",
6335 s->name, obj))
6336 return;
6337
6338 cachep = slab->slab_cache;
6339
6340 if (WARN_ONCE(cachep != s,
6341 "kmem_cache_free(%s, %p): object belongs to different cache %s\n",
6342 s->name, obj, cachep ? cachep->name : "(NULL)")) {
6343 if (cachep)
6344 print_tracking(cachep, obj);
6345 return;
6346 }
6347 }
6348
6349 /**
6350 * kmem_cache_free - Deallocate an object
6351 * @s: The cache the allocation was from.
6352 * @x: The previously allocated object.
6353 *
6354 * Free an object which was previously allocated from this
6355 * cache.
6356 */
kmem_cache_free(struct kmem_cache * s,void * x)6357 void kmem_cache_free(struct kmem_cache *s, void *x)
6358 {
6359 struct slab *slab;
6360
6361 slab = virt_to_slab(x);
6362
6363 if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) ||
6364 kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
6365
6366 /*
6367 * Intentionally leak the object in these cases, because it
6368 * would be too dangerous to continue.
6369 */
6370 if (unlikely(!slab || (slab->slab_cache != s))) {
6371 warn_free_bad_obj(s, x);
6372 return;
6373 }
6374 }
6375
6376 trace_kmem_cache_free(_RET_IP_, x, s);
6377 slab_free(s, slab, x, _RET_IP_);
6378 }
6379 EXPORT_SYMBOL(kmem_cache_free);
6380
slab_ksize(struct slab * slab)6381 static inline size_t slab_ksize(struct slab *slab)
6382 {
6383 struct kmem_cache *s = slab->slab_cache;
6384
6385 #ifdef CONFIG_SLUB_DEBUG
6386 /*
6387 * Debugging requires use of the padding between object
6388 * and whatever may come after it.
6389 */
6390 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
6391 return s->object_size;
6392 #endif
6393 if (s->flags & SLAB_KASAN)
6394 return s->object_size;
6395 /*
6396 * If we have the need to store the freelist pointer
6397 * or any other metadata back there then we can
6398 * only use the space before that information.
6399 */
6400 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
6401 return s->inuse;
6402 else if (obj_exts_in_object(s, slab))
6403 return s->inuse;
6404 /*
6405 * Else we can use all the padding etc for the allocation
6406 */
6407 return s->size;
6408 }
6409
__ksize(const void * object)6410 static size_t __ksize(const void *object)
6411 {
6412 struct page *page;
6413 struct slab *slab;
6414
6415 if (unlikely(object == ZERO_SIZE_PTR))
6416 return 0;
6417
6418 page = virt_to_page(object);
6419
6420 if (unlikely(PageLargeKmalloc(page)))
6421 return large_kmalloc_size(page);
6422
6423 slab = page_slab(page);
6424 /* Delete this after we're sure there are no users */
6425 if (WARN_ON(!slab))
6426 return page_size(page);
6427
6428 #ifdef CONFIG_SLUB_DEBUG
6429 skip_orig_size_check(slab->slab_cache, object);
6430 #endif
6431
6432 return slab_ksize(slab);
6433 }
6434
6435 /**
6436 * ksize -- Report full size of underlying allocation
6437 * @objp: pointer to the object
6438 *
6439 * This should only be used internally to query the true size of allocations.
6440 * It is not meant to be a way to discover the usable size of an allocation
6441 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
6442 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
6443 * and/or FORTIFY_SOURCE.
6444 *
6445 * Return: size of the actual memory used by @objp in bytes
6446 */
ksize(const void * objp)6447 size_t ksize(const void *objp)
6448 {
6449 /*
6450 * We need to first check that the pointer to the object is valid.
6451 * The KASAN report printed from ksize() is more useful, then when
6452 * it's printed later when the behaviour could be undefined due to
6453 * a potential use-after-free or double-free.
6454 *
6455 * We use kasan_check_byte(), which is supported for the hardware
6456 * tag-based KASAN mode, unlike kasan_check_read/write().
6457 *
6458 * If the pointed to memory is invalid, we return 0 to avoid users of
6459 * ksize() writing to and potentially corrupting the memory region.
6460 *
6461 * We want to perform the check before __ksize(), to avoid potentially
6462 * crashing in __ksize() due to accessing invalid metadata.
6463 */
6464 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
6465 return 0;
6466
6467 return kfence_ksize(objp) ?: __ksize(objp);
6468 }
6469 EXPORT_SYMBOL(ksize);
6470
free_large_kmalloc(struct page * page,void * object)6471 static void free_large_kmalloc(struct page *page, void *object)
6472 {
6473 unsigned int order = compound_order(page);
6474
6475 if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
6476 dump_page(page, "Not a kmalloc allocation");
6477 return;
6478 }
6479
6480 if (WARN_ON_ONCE(order == 0))
6481 pr_warn_once("object pointer: 0x%p\n", object);
6482
6483 kmemleak_free(object);
6484 kasan_kfree_large(object);
6485 kmsan_kfree_large(object);
6486
6487 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
6488 -(PAGE_SIZE << order));
6489 __ClearPageLargeKmalloc(page);
6490 free_frozen_pages(page, order);
6491 }
6492
6493 /*
6494 * Given an rcu_head embedded within an object obtained from kvmalloc at an
6495 * offset < 4k, free the object in question.
6496 */
kvfree_rcu_cb(struct rcu_head * head)6497 void kvfree_rcu_cb(struct rcu_head *head)
6498 {
6499 void *obj = head;
6500 struct page *page;
6501 struct slab *slab;
6502 struct kmem_cache *s;
6503 void *slab_addr;
6504
6505 if (is_vmalloc_addr(obj)) {
6506 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
6507 vfree(obj);
6508 return;
6509 }
6510
6511 page = virt_to_page(obj);
6512 slab = page_slab(page);
6513 if (!slab) {
6514 /*
6515 * rcu_head offset can be only less than page size so no need to
6516 * consider allocation order
6517 */
6518 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
6519 free_large_kmalloc(page, obj);
6520 return;
6521 }
6522
6523 s = slab->slab_cache;
6524 slab_addr = slab_address(slab);
6525
6526 if (is_kfence_address(obj)) {
6527 obj = kfence_object_start(obj);
6528 } else {
6529 unsigned int idx = __obj_to_index(s, slab_addr, obj);
6530
6531 obj = slab_addr + s->size * idx;
6532 obj = fixup_red_left(s, obj);
6533 }
6534
6535 slab_free(s, slab, obj, _RET_IP_);
6536 }
6537
6538 /**
6539 * kfree - free previously allocated memory
6540 * @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc()
6541 *
6542 * If @object is NULL, no operation is performed.
6543 */
kfree(const void * object)6544 void kfree(const void *object)
6545 {
6546 struct page *page;
6547 struct slab *slab;
6548 struct kmem_cache *s;
6549 void *x = (void *)object;
6550
6551 trace_kfree(_RET_IP_, object);
6552
6553 if (unlikely(ZERO_OR_NULL_PTR(object)))
6554 return;
6555
6556 page = virt_to_page(object);
6557 slab = page_slab(page);
6558 if (!slab) {
6559 /* kmalloc_nolock() doesn't support large kmalloc */
6560 free_large_kmalloc(page, (void *)object);
6561 return;
6562 }
6563
6564 s = slab->slab_cache;
6565 slab_free(s, slab, x, _RET_IP_);
6566 }
6567 EXPORT_SYMBOL(kfree);
6568
6569 /*
6570 * Can be called while holding raw_spinlock_t or from IRQ and NMI,
6571 * but ONLY for objects allocated by kmalloc_nolock().
6572 * Debug checks (like kmemleak and kfence) were skipped on allocation,
6573 * hence
6574 * obj = kmalloc(); kfree_nolock(obj);
6575 * will miss kmemleak/kfence book keeping and will cause false positives.
6576 * large_kmalloc is not supported either.
6577 */
kfree_nolock(const void * object)6578 void kfree_nolock(const void *object)
6579 {
6580 struct slab *slab;
6581 struct kmem_cache *s;
6582 void *x = (void *)object;
6583
6584 if (unlikely(ZERO_OR_NULL_PTR(object)))
6585 return;
6586
6587 slab = virt_to_slab(object);
6588 if (unlikely(!slab)) {
6589 WARN_ONCE(1, "large_kmalloc is not supported by kfree_nolock()");
6590 return;
6591 }
6592
6593 s = slab->slab_cache;
6594
6595 memcg_slab_free_hook(s, slab, &x, 1);
6596 alloc_tagging_slab_free_hook(s, slab, &x, 1);
6597 /*
6598 * Unlike slab_free() do NOT call the following:
6599 * kmemleak_free_recursive(x, s->flags);
6600 * debug_check_no_locks_freed(x, s->object_size);
6601 * debug_check_no_obj_freed(x, s->object_size);
6602 * __kcsan_check_access(x, s->object_size, ..);
6603 * kfence_free(x);
6604 * since they take spinlocks or not safe from any context.
6605 */
6606 kmsan_slab_free(s, x);
6607 /*
6608 * If KASAN finds a kernel bug it will do kasan_report_invalid_free()
6609 * which will call raw_spin_lock_irqsave() which is technically
6610 * unsafe from NMI, but take chance and report kernel bug.
6611 * The sequence of
6612 * kasan_report_invalid_free() -> raw_spin_lock_irqsave() -> NMI
6613 * -> kfree_nolock() -> kasan_report_invalid_free() on the same CPU
6614 * is double buggy and deserves to deadlock.
6615 */
6616 if (kasan_slab_pre_free(s, x))
6617 return;
6618 /*
6619 * memcg, kasan_slab_pre_free are done for 'x'.
6620 * The only thing left is kasan_poison without quarantine,
6621 * since kasan quarantine takes locks and not supported from NMI.
6622 */
6623 kasan_slab_free(s, x, false, false, /* skip quarantine */true);
6624
6625 if (likely(can_free_to_pcs(slab)) && likely(free_to_pcs(s, x, false)))
6626 return;
6627
6628 /*
6629 * __slab_free() can locklessly cmpxchg16 into a slab, but then it might
6630 * need to take spin_lock for further processing.
6631 * Avoid the complexity and simply add to a deferred list.
6632 */
6633 defer_free(s, x);
6634 }
6635 EXPORT_SYMBOL_GPL(kfree_nolock);
6636
6637 static __always_inline __realloc_size(2) void *
__do_krealloc(const void * p,size_t new_size,unsigned long align,gfp_t flags,int nid)6638 __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, int nid)
6639 {
6640 void *ret;
6641 size_t ks = 0;
6642 int orig_size = 0;
6643 struct kmem_cache *s = NULL;
6644
6645 if (unlikely(ZERO_OR_NULL_PTR(p)))
6646 goto alloc_new;
6647
6648 /* Check for double-free. */
6649 if (!kasan_check_byte(p))
6650 return NULL;
6651
6652 if (is_kfence_address(p)) {
6653 ks = orig_size = kfence_ksize(p);
6654 } else {
6655 struct page *page = virt_to_page(p);
6656 struct slab *slab = page_slab(page);
6657
6658 if (!slab) {
6659 /* Big kmalloc object */
6660 ks = page_size(page);
6661 WARN_ON(ks <= KMALLOC_MAX_CACHE_SIZE);
6662 WARN_ON(p != page_address(page));
6663 } else {
6664 s = slab->slab_cache;
6665 orig_size = get_orig_size(s, (void *)p);
6666 ks = s->object_size;
6667 }
6668 }
6669
6670 /*
6671 * If reallocation is not necessary (e. g. the new size is less
6672 * than the current allocated size), the current allocation will be
6673 * preserved unless __GFP_THISNODE is set. In the latter case a new
6674 * allocation on the requested node will be attempted.
6675 */
6676 if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
6677 nid != page_to_nid(virt_to_page(p)))
6678 goto alloc_new;
6679
6680 /* If the old object doesn't fit, allocate a bigger one */
6681 if (new_size > ks)
6682 goto alloc_new;
6683
6684 /* If the old object doesn't satisfy the new alignment, allocate a new one */
6685 if (!IS_ALIGNED((unsigned long)p, align))
6686 goto alloc_new;
6687
6688 /* Zero out spare memory. */
6689 if (want_init_on_alloc(flags)) {
6690 kasan_disable_current();
6691 if (orig_size && orig_size < new_size)
6692 memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size);
6693 else
6694 memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
6695 kasan_enable_current();
6696 }
6697
6698 /* Setup kmalloc redzone when needed */
6699 if (s && slub_debug_orig_size(s)) {
6700 set_orig_size(s, (void *)p, new_size);
6701 if (s->flags & SLAB_RED_ZONE && new_size < ks)
6702 memset_no_sanitize_memory(kasan_reset_tag(p) + new_size,
6703 SLUB_RED_ACTIVE, ks - new_size);
6704 }
6705
6706 p = kasan_krealloc(p, new_size, flags);
6707 return (void *)p;
6708
6709 alloc_new:
6710 ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
6711 if (ret && p) {
6712 /* Disable KASAN checks as the object's redzone is accessed. */
6713 kasan_disable_current();
6714 memcpy(ret, kasan_reset_tag(p), min(new_size, (size_t)(orig_size ?: ks)));
6715 kasan_enable_current();
6716 }
6717
6718 return ret;
6719 }
6720
6721 /**
6722 * krealloc_node_align - reallocate memory. The contents will remain unchanged.
6723 * @p: object to reallocate memory for.
6724 * @new_size: how many bytes of memory are required.
6725 * @align: desired alignment.
6726 * @flags: the type of memory to allocate.
6727 * @nid: NUMA node or NUMA_NO_NODE
6728 *
6729 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
6730 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
6731 *
6732 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6733 * Documentation/core-api/memory-allocation.rst for more details.
6734 *
6735 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
6736 * initial memory allocation, every subsequent call to this API for the same
6737 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
6738 * __GFP_ZERO is not fully honored by this API.
6739 *
6740 * When slub_debug_orig_size() is off, krealloc() only knows about the bucket
6741 * size of an allocation (but not the exact size it was allocated with) and
6742 * hence implements the following semantics for shrinking and growing buffers
6743 * with __GFP_ZERO::
6744 *
6745 * new bucket
6746 * 0 size size
6747 * |--------|----------------|
6748 * | keep | zero |
6749 *
6750 * Otherwise, the original allocation size 'orig_size' could be used to
6751 * precisely clear the requested size, and the new size will also be stored
6752 * as the new 'orig_size'.
6753 *
6754 * In any case, the contents of the object pointed to are preserved up to the
6755 * lesser of the new and old sizes.
6756 *
6757 * Return: pointer to the allocated memory or %NULL in case of error
6758 */
krealloc_node_align_noprof(const void * p,size_t new_size,unsigned long align,gfp_t flags,int nid)6759 void *krealloc_node_align_noprof(const void *p, size_t new_size, unsigned long align,
6760 gfp_t flags, int nid)
6761 {
6762 void *ret;
6763
6764 if (unlikely(!new_size)) {
6765 kfree(p);
6766 return ZERO_SIZE_PTR;
6767 }
6768
6769 ret = __do_krealloc(p, new_size, align, flags, nid);
6770 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
6771 kfree(p);
6772
6773 return ret;
6774 }
6775 EXPORT_SYMBOL(krealloc_node_align_noprof);
6776
kmalloc_gfp_adjust(gfp_t flags,size_t size)6777 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
6778 {
6779 /*
6780 * We want to attempt a large physically contiguous block first because
6781 * it is less likely to fragment multiple larger blocks and therefore
6782 * contribute to a long term fragmentation less than vmalloc fallback.
6783 * However make sure that larger requests are not too disruptive - i.e.
6784 * do not direct reclaim unless physically continuous memory is preferred
6785 * (__GFP_RETRY_MAYFAIL mode). We still kick in kswapd/kcompactd to
6786 * start working in the background
6787 */
6788 if (size > PAGE_SIZE) {
6789 flags |= __GFP_NOWARN;
6790
6791 if (!(flags & __GFP_RETRY_MAYFAIL))
6792 flags &= ~__GFP_DIRECT_RECLAIM;
6793
6794 /* nofail semantic is implemented by the vmalloc fallback */
6795 flags &= ~__GFP_NOFAIL;
6796 }
6797
6798 return flags;
6799 }
6800
6801 /**
6802 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
6803 * failure, fall back to non-contiguous (vmalloc) allocation.
6804 * @size: size of the request.
6805 * @b: which set of kmalloc buckets to allocate from.
6806 * @align: desired alignment.
6807 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
6808 * @node: numa node to allocate from
6809 *
6810 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6811 * Documentation/core-api/memory-allocation.rst for more details.
6812 *
6813 * Uses kmalloc to get the memory but if the allocation fails then falls back
6814 * to the vmalloc allocator. Use kvfree for freeing the memory.
6815 *
6816 * GFP_NOWAIT and GFP_ATOMIC are supported, the __GFP_NORETRY modifier is not.
6817 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
6818 * preferable to the vmalloc fallback, due to visible performance drawbacks.
6819 *
6820 * Return: pointer to the allocated memory of %NULL in case of failure
6821 */
__kvmalloc_node_noprof(DECL_BUCKET_PARAMS (size,b),unsigned long align,gfp_t flags,int node)6822 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
6823 gfp_t flags, int node)
6824 {
6825 bool allow_block;
6826 void *ret;
6827
6828 /*
6829 * It doesn't really make sense to fallback to vmalloc for sub page
6830 * requests
6831 */
6832 ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b),
6833 kmalloc_gfp_adjust(flags, size),
6834 node, _RET_IP_);
6835 if (ret || size <= PAGE_SIZE)
6836 return ret;
6837
6838 /* Don't even allow crazy sizes */
6839 if (unlikely(size > INT_MAX)) {
6840 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
6841 return NULL;
6842 }
6843
6844 /*
6845 * For non-blocking the VM_ALLOW_HUGE_VMAP is not used
6846 * because the huge-mapping path in vmalloc contains at
6847 * least one might_sleep() call.
6848 *
6849 * TODO: Revise huge-mapping path to support non-blocking
6850 * flags.
6851 */
6852 allow_block = gfpflags_allow_blocking(flags);
6853
6854 /*
6855 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
6856 * since the callers already cannot assume anything
6857 * about the resulting pointer, and cannot play
6858 * protection games.
6859 */
6860 return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
6861 flags, PAGE_KERNEL, allow_block ? VM_ALLOW_HUGE_VMAP:0,
6862 node, __builtin_return_address(0));
6863 }
6864 EXPORT_SYMBOL(__kvmalloc_node_noprof);
6865
6866 /**
6867 * kvfree() - Free memory.
6868 * @addr: Pointer to allocated memory.
6869 *
6870 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
6871 * It is slightly more efficient to use kfree() or vfree() if you are certain
6872 * that you know which one to use.
6873 *
6874 * Context: Either preemptible task context or not-NMI interrupt.
6875 */
kvfree(const void * addr)6876 void kvfree(const void *addr)
6877 {
6878 if (is_vmalloc_addr(addr))
6879 vfree(addr);
6880 else
6881 kfree(addr);
6882 }
6883 EXPORT_SYMBOL(kvfree);
6884
6885 /**
6886 * kvfree_atomic() - Free memory.
6887 * @addr: Pointer to allocated memory.
6888 *
6889 * Same as kvfree(), but uses vfree_atomic() for vmalloc
6890 * backed memory. Must not be called from NMI context.
6891 */
kvfree_atomic(const void * addr)6892 void kvfree_atomic(const void *addr)
6893 {
6894 if (is_vmalloc_addr(addr))
6895 vfree_atomic(addr);
6896 else
6897 kfree(addr);
6898 }
6899 EXPORT_SYMBOL(kvfree_atomic);
6900
6901 /**
6902 * kvfree_sensitive - Free a data object containing sensitive information.
6903 * @addr: address of the data object to be freed.
6904 * @len: length of the data object.
6905 *
6906 * Use the special memzero_explicit() function to clear the content of a
6907 * kvmalloc'ed object containing sensitive data to make sure that the
6908 * compiler won't optimize out the data clearing.
6909 */
kvfree_sensitive(const void * addr,size_t len)6910 void kvfree_sensitive(const void *addr, size_t len)
6911 {
6912 if (likely(!ZERO_OR_NULL_PTR(addr))) {
6913 memzero_explicit((void *)addr, len);
6914 kvfree(addr);
6915 }
6916 }
6917 EXPORT_SYMBOL(kvfree_sensitive);
6918
6919 /**
6920 * kvrealloc_node_align - reallocate memory; contents remain unchanged
6921 * @p: object to reallocate memory for
6922 * @size: the size to reallocate
6923 * @align: desired alignment
6924 * @flags: the flags for the page level allocator
6925 * @nid: NUMA node id
6926 *
6927 * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
6928 * and @p is not a %NULL pointer, the object pointed to is freed.
6929 *
6930 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6931 * Documentation/core-api/memory-allocation.rst for more details.
6932 *
6933 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
6934 * initial memory allocation, every subsequent call to this API for the same
6935 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
6936 * __GFP_ZERO is not fully honored by this API.
6937 *
6938 * In any case, the contents of the object pointed to are preserved up to the
6939 * lesser of the new and old sizes.
6940 *
6941 * This function must not be called concurrently with itself or kvfree() for the
6942 * same memory allocation.
6943 *
6944 * Return: pointer to the allocated memory or %NULL in case of error
6945 */
kvrealloc_node_align_noprof(const void * p,size_t size,unsigned long align,gfp_t flags,int nid)6946 void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
6947 gfp_t flags, int nid)
6948 {
6949 void *n;
6950
6951 if (is_vmalloc_addr(p))
6952 return vrealloc_node_align_noprof(p, size, align, flags, nid);
6953
6954 n = krealloc_node_align_noprof(p, size, align, kmalloc_gfp_adjust(flags, size), nid);
6955 if (!n) {
6956 /* We failed to krealloc(), fall back to kvmalloc(). */
6957 n = kvmalloc_node_align_noprof(size, align, flags, nid);
6958 if (!n)
6959 return NULL;
6960
6961 if (p) {
6962 /* We already know that `p` is not a vmalloc address. */
6963 kasan_disable_current();
6964 memcpy(n, kasan_reset_tag(p), min(size, ksize(p)));
6965 kasan_enable_current();
6966
6967 kfree(p);
6968 }
6969 }
6970
6971 return n;
6972 }
6973 EXPORT_SYMBOL(kvrealloc_node_align_noprof);
6974
6975 struct detached_freelist {
6976 struct slab *slab;
6977 void *tail;
6978 void *freelist;
6979 int cnt;
6980 struct kmem_cache *s;
6981 };
6982
6983 /*
6984 * This function progressively scans the array with free objects (with
6985 * a limited look ahead) and extract objects belonging to the same
6986 * slab. It builds a detached freelist directly within the given
6987 * slab/objects. This can happen without any need for
6988 * synchronization, because the objects are owned by running process.
6989 * The freelist is build up as a single linked list in the objects.
6990 * The idea is, that this detached freelist can then be bulk
6991 * transferred to the real freelist(s), but only requiring a single
6992 * synchronization primitive. Look ahead in the array is limited due
6993 * to performance reasons.
6994 */
6995 static inline
build_detached_freelist(struct kmem_cache * s,size_t size,void ** p,struct detached_freelist * df)6996 int build_detached_freelist(struct kmem_cache *s, size_t size,
6997 void **p, struct detached_freelist *df)
6998 {
6999 int lookahead = 3;
7000 void *object;
7001 struct page *page;
7002 struct slab *slab;
7003 size_t same;
7004
7005 object = p[--size];
7006 page = virt_to_page(object);
7007 slab = page_slab(page);
7008 if (!s) {
7009 /* Handle kalloc'ed objects */
7010 if (!slab) {
7011 free_large_kmalloc(page, object);
7012 df->slab = NULL;
7013 return size;
7014 }
7015 /* Derive kmem_cache from object */
7016 df->slab = slab;
7017 df->s = slab->slab_cache;
7018 } else {
7019 df->slab = slab;
7020 df->s = s;
7021 }
7022
7023 /* Start new detached freelist */
7024 df->tail = object;
7025 df->freelist = object;
7026 df->cnt = 1;
7027
7028 if (is_kfence_address(object))
7029 return size;
7030
7031 set_freepointer(df->s, object, NULL);
7032
7033 same = size;
7034 while (size) {
7035 object = p[--size];
7036 /* df->slab is always set at this point */
7037 if (df->slab == virt_to_slab(object)) {
7038 /* Opportunity build freelist */
7039 set_freepointer(df->s, object, df->freelist);
7040 df->freelist = object;
7041 df->cnt++;
7042 same--;
7043 if (size != same)
7044 swap(p[size], p[same]);
7045 continue;
7046 }
7047
7048 /* Limit look ahead search */
7049 if (!--lookahead)
7050 break;
7051 }
7052
7053 return same;
7054 }
7055
7056 /*
7057 * Internal bulk free of objects that were not initialised by the post alloc
7058 * hooks and thus should not be processed by the free hooks
7059 */
__kmem_cache_free_bulk(struct kmem_cache * s,size_t size,void ** p)7060 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
7061 {
7062 if (!size)
7063 return;
7064
7065 do {
7066 struct detached_freelist df;
7067
7068 size = build_detached_freelist(s, size, p, &df);
7069 if (!df.slab)
7070 continue;
7071
7072 if (kfence_free(df.freelist))
7073 continue;
7074
7075 __slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
7076 _RET_IP_);
7077 } while (likely(size));
7078 }
7079
7080 /* Note that interrupts must be enabled when calling this function. */
kmem_cache_free_bulk(struct kmem_cache * s,size_t size,void ** p)7081 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
7082 {
7083 if (!size)
7084 return;
7085
7086 /*
7087 * freeing to sheaves is so incompatible with the detached freelist so
7088 * once we go that way, we have to do everything differently
7089 */
7090 if (s && cache_has_sheaves(s)) {
7091 free_to_pcs_bulk(s, size, p);
7092 return;
7093 }
7094
7095 do {
7096 struct detached_freelist df;
7097
7098 size = build_detached_freelist(s, size, p, &df);
7099 if (!df.slab)
7100 continue;
7101
7102 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
7103 df.cnt, _RET_IP_);
7104 } while (likely(size));
7105 }
7106 EXPORT_SYMBOL(kmem_cache_free_bulk);
7107
7108 static unsigned int
__refill_objects_node(struct kmem_cache * s,void ** p,gfp_t gfp,unsigned int min,unsigned int max,struct kmem_cache_node * n,bool allow_spin)7109 __refill_objects_node(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7110 unsigned int max, struct kmem_cache_node *n,
7111 bool allow_spin)
7112 {
7113 struct partial_bulk_context pc;
7114 struct slab *slab, *slab2;
7115 unsigned int refilled = 0;
7116 unsigned long flags;
7117 void *object;
7118
7119 pc.flags = gfp;
7120 pc.min_objects = min;
7121 pc.max_objects = max;
7122
7123 if (!get_partial_node_bulk(s, n, &pc, allow_spin))
7124 return 0;
7125
7126 list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7127
7128 list_del(&slab->slab_list);
7129
7130 object = get_freelist_nofreeze(s, slab);
7131
7132 while (object && refilled < max) {
7133 p[refilled] = object;
7134 object = get_freepointer(s, object);
7135 maybe_wipe_obj_freeptr(s, p[refilled]);
7136
7137 refilled++;
7138 }
7139
7140 /*
7141 * Freelist had more objects than we can accommodate, we need to
7142 * free them back. We can treat it like a detached freelist, just
7143 * need to find the tail object.
7144 */
7145 if (unlikely(object)) {
7146 void *head = object;
7147 void *tail;
7148 int cnt = 0;
7149
7150 do {
7151 tail = object;
7152 cnt++;
7153 object = get_freepointer(s, object);
7154 } while (object);
7155 __slab_free(s, slab, head, tail, cnt, _RET_IP_);
7156 }
7157
7158 if (refilled >= max)
7159 break;
7160 }
7161
7162 if (unlikely(!list_empty(&pc.slabs))) {
7163 spin_lock_irqsave(&n->list_lock, flags);
7164
7165 list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7166
7167 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial))
7168 continue;
7169
7170 list_del(&slab->slab_list);
7171 add_partial(n, slab, ADD_TO_HEAD);
7172 }
7173
7174 spin_unlock_irqrestore(&n->list_lock, flags);
7175
7176 /* any slabs left are completely free and for discard */
7177 list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7178
7179 list_del(&slab->slab_list);
7180 discard_slab(s, slab);
7181 }
7182 }
7183
7184 return refilled;
7185 }
7186
7187 #ifdef CONFIG_NUMA
7188 static unsigned int
__refill_objects_any(struct kmem_cache * s,void ** p,gfp_t gfp,unsigned int min,unsigned int max)7189 __refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7190 unsigned int max)
7191 {
7192 struct zonelist *zonelist;
7193 struct zoneref *z;
7194 struct zone *zone;
7195 enum zone_type highest_zoneidx = gfp_zone(gfp);
7196 unsigned int cpuset_mems_cookie;
7197 unsigned int refilled = 0;
7198
7199 /* see get_from_any_partial() for the defrag ratio description */
7200 if (!s->remote_node_defrag_ratio ||
7201 get_cycles() % 1024 > s->remote_node_defrag_ratio)
7202 return 0;
7203
7204 do {
7205 cpuset_mems_cookie = read_mems_allowed_begin();
7206 zonelist = node_zonelist(mempolicy_slab_node(), gfp);
7207 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
7208 struct kmem_cache_node *n;
7209 unsigned int r;
7210
7211 n = get_node(s, zone_to_nid(zone));
7212
7213 if (!n || !cpuset_zone_allowed(zone, gfp) ||
7214 n->nr_partial <= s->min_partial)
7215 continue;
7216
7217 r = __refill_objects_node(s, p, gfp, min, max, n,
7218 /* allow_spin = */ false);
7219 refilled += r;
7220
7221 if (r >= min) {
7222 /*
7223 * Don't check read_mems_allowed_retry() here -
7224 * if mems_allowed was updated in parallel, that
7225 * was a harmless race between allocation and
7226 * the cpuset update
7227 */
7228 return refilled;
7229 }
7230 p += r;
7231 min -= r;
7232 max -= r;
7233 }
7234 } while (read_mems_allowed_retry(cpuset_mems_cookie));
7235
7236 return refilled;
7237 }
7238 #else
7239 static inline unsigned int
__refill_objects_any(struct kmem_cache * s,void ** p,gfp_t gfp,unsigned int min,unsigned int max)7240 __refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7241 unsigned int max)
7242 {
7243 return 0;
7244 }
7245 #endif
7246
7247 static unsigned int
refill_objects(struct kmem_cache * s,void ** p,gfp_t gfp,unsigned int min,unsigned int max)7248 refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7249 unsigned int max)
7250 {
7251 int local_node = numa_mem_id();
7252 unsigned int refilled;
7253 struct slab *slab;
7254
7255 if (WARN_ON_ONCE(!gfpflags_allow_spinning(gfp)))
7256 return 0;
7257
7258 refilled = __refill_objects_node(s, p, gfp, min, max,
7259 get_node(s, local_node),
7260 /* allow_spin = */ true);
7261 if (refilled >= min)
7262 return refilled;
7263
7264 refilled += __refill_objects_any(s, p + refilled, gfp, min - refilled,
7265 max - refilled);
7266 if (refilled >= min)
7267 return refilled;
7268
7269 new_slab:
7270
7271 slab = new_slab(s, gfp, local_node);
7272 if (!slab)
7273 goto out;
7274
7275 stat(s, ALLOC_SLAB);
7276
7277 /*
7278 * TODO: possible optimization - if we know we will consume the whole
7279 * slab we might skip creating the freelist?
7280 */
7281 refilled += alloc_from_new_slab(s, slab, p + refilled, max - refilled,
7282 /* allow_spin = */ true);
7283
7284 if (refilled < min)
7285 goto new_slab;
7286
7287 out:
7288 return refilled;
7289 }
7290
7291 static inline
__kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t size,void ** p)7292 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
7293 void **p)
7294 {
7295 int i;
7296
7297 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
7298 for (i = 0; i < size; i++) {
7299
7300 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_,
7301 s->object_size);
7302 if (unlikely(!p[i]))
7303 goto error;
7304
7305 maybe_wipe_obj_freeptr(s, p[i]);
7306 }
7307 } else {
7308 i = refill_objects(s, p, flags, size, size);
7309 if (i < size)
7310 goto error;
7311 stat_add(s, ALLOC_SLOWPATH, i);
7312 }
7313
7314 return i;
7315
7316 error:
7317 __kmem_cache_free_bulk(s, i, p);
7318 return 0;
7319
7320 }
7321
7322 /*
7323 * Note that interrupts must be enabled when calling this function and gfp
7324 * flags must allow spinning.
7325 */
kmem_cache_alloc_bulk_noprof(struct kmem_cache * s,gfp_t flags,size_t size,void ** p)7326 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
7327 void **p)
7328 {
7329 unsigned int i = 0;
7330 void *kfence_obj;
7331
7332 if (!size)
7333 return 0;
7334
7335 s = slab_pre_alloc_hook(s, flags);
7336 if (unlikely(!s))
7337 return 0;
7338
7339 /*
7340 * to make things simpler, only assume at most once kfence allocated
7341 * object per bulk allocation and choose its index randomly
7342 */
7343 kfence_obj = kfence_alloc(s, s->object_size, flags);
7344
7345 if (unlikely(kfence_obj)) {
7346 if (unlikely(size == 1)) {
7347 p[0] = kfence_obj;
7348 goto out;
7349 }
7350 size--;
7351 }
7352
7353 i = alloc_from_pcs_bulk(s, flags, size, p);
7354
7355 if (i < size) {
7356 /*
7357 * If we ran out of memory, don't bother with freeing back to
7358 * the percpu sheaves, we have bigger problems.
7359 */
7360 if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) {
7361 if (i > 0)
7362 __kmem_cache_free_bulk(s, i, p);
7363 if (kfence_obj)
7364 __kfence_free(kfence_obj);
7365 return 0;
7366 }
7367 }
7368
7369 if (unlikely(kfence_obj)) {
7370 int idx = get_random_u32_below(size + 1);
7371
7372 if (idx != size)
7373 p[size] = p[idx];
7374 p[idx] = kfence_obj;
7375
7376 size++;
7377 }
7378
7379 out:
7380 /*
7381 * memcg and kmem_cache debug support and memory initialization.
7382 * Done outside of the IRQ disabled fastpath loop.
7383 */
7384 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
7385 slab_want_init_on_alloc(flags, s), s->object_size))) {
7386 return 0;
7387 }
7388
7389 return size;
7390 }
7391 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
7392
7393 /*
7394 * Object placement in a slab is made very easy because we always start at
7395 * offset 0. If we tune the size of the object to the alignment then we can
7396 * get the required alignment by putting one properly sized object after
7397 * another.
7398 *
7399 * Notice that the allocation order determines the sizes of the per cpu
7400 * caches. Each processor has always one slab available for allocations.
7401 * Increasing the allocation order reduces the number of times that slabs
7402 * must be moved on and off the partial lists and is therefore a factor in
7403 * locking overhead.
7404 */
7405
7406 /*
7407 * Minimum / Maximum order of slab pages. This influences locking overhead
7408 * and slab fragmentation. A higher order reduces the number of partial slabs
7409 * and increases the number of allocations possible without having to
7410 * take the list_lock.
7411 */
7412 static unsigned int slub_min_order;
7413 static unsigned int slub_max_order =
7414 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
7415 static unsigned int slub_min_objects;
7416
7417 /*
7418 * Calculate the order of allocation given an slab object size.
7419 *
7420 * The order of allocation has significant impact on performance and other
7421 * system components. Generally order 0 allocations should be preferred since
7422 * order 0 does not cause fragmentation in the page allocator. Larger objects
7423 * be problematic to put into order 0 slabs because there may be too much
7424 * unused space left. We go to a higher order if more than 1/16th of the slab
7425 * would be wasted.
7426 *
7427 * In order to reach satisfactory performance we must ensure that a minimum
7428 * number of objects is in one slab. Otherwise we may generate too much
7429 * activity on the partial lists which requires taking the list_lock. This is
7430 * less a concern for large slabs though which are rarely used.
7431 *
7432 * slab_max_order specifies the order where we begin to stop considering the
7433 * number of objects in a slab as critical. If we reach slab_max_order then
7434 * we try to keep the page order as low as possible. So we accept more waste
7435 * of space in favor of a small page order.
7436 *
7437 * Higher order allocations also allow the placement of more objects in a
7438 * slab and thereby reduce object handling overhead. If the user has
7439 * requested a higher minimum order then we start with that one instead of
7440 * the smallest order which will fit the object.
7441 */
calc_slab_order(unsigned int size,unsigned int min_order,unsigned int max_order,unsigned int fract_leftover)7442 static inline unsigned int calc_slab_order(unsigned int size,
7443 unsigned int min_order, unsigned int max_order,
7444 unsigned int fract_leftover)
7445 {
7446 unsigned int order;
7447
7448 for (order = min_order; order <= max_order; order++) {
7449
7450 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
7451 unsigned int rem;
7452
7453 rem = slab_size % size;
7454
7455 if (rem <= slab_size / fract_leftover)
7456 break;
7457 }
7458
7459 return order;
7460 }
7461
calculate_order(unsigned int size)7462 static inline int calculate_order(unsigned int size)
7463 {
7464 unsigned int order;
7465 unsigned int min_objects;
7466 unsigned int max_objects;
7467 unsigned int min_order;
7468
7469 min_objects = slub_min_objects;
7470 if (!min_objects) {
7471 /*
7472 * Some architectures will only update present cpus when
7473 * onlining them, so don't trust the number if it's just 1. But
7474 * we also don't want to use nr_cpu_ids always, as on some other
7475 * architectures, there can be many possible cpus, but never
7476 * onlined. Here we compromise between trying to avoid too high
7477 * order on systems that appear larger than they are, and too
7478 * low order on systems that appear smaller than they are.
7479 */
7480 unsigned int nr_cpus = num_present_cpus();
7481 if (nr_cpus <= 1)
7482 nr_cpus = nr_cpu_ids;
7483 min_objects = 4 * (fls(nr_cpus) + 1);
7484 }
7485 /* min_objects can't be 0 because get_order(0) is undefined */
7486 max_objects = max(order_objects(slub_max_order, size), 1U);
7487 min_objects = min(min_objects, max_objects);
7488
7489 min_order = max_t(unsigned int, slub_min_order,
7490 get_order(min_objects * size));
7491 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
7492 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
7493
7494 /*
7495 * Attempt to find best configuration for a slab. This works by first
7496 * attempting to generate a layout with the best possible configuration
7497 * and backing off gradually.
7498 *
7499 * We start with accepting at most 1/16 waste and try to find the
7500 * smallest order from min_objects-derived/slab_min_order up to
7501 * slab_max_order that will satisfy the constraint. Note that increasing
7502 * the order can only result in same or less fractional waste, not more.
7503 *
7504 * If that fails, we increase the acceptable fraction of waste and try
7505 * again. The last iteration with fraction of 1/2 would effectively
7506 * accept any waste and give us the order determined by min_objects, as
7507 * long as at least single object fits within slab_max_order.
7508 */
7509 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
7510 order = calc_slab_order(size, min_order, slub_max_order,
7511 fraction);
7512 if (order <= slub_max_order)
7513 return order;
7514 }
7515
7516 /*
7517 * Doh this slab cannot be placed using slab_max_order.
7518 */
7519 order = get_order(size);
7520 if (order <= MAX_PAGE_ORDER)
7521 return order;
7522 return -ENOSYS;
7523 }
7524
7525 static void
init_kmem_cache_node(struct kmem_cache_node * n)7526 init_kmem_cache_node(struct kmem_cache_node *n)
7527 {
7528 n->nr_partial = 0;
7529 spin_lock_init(&n->list_lock);
7530 INIT_LIST_HEAD(&n->partial);
7531 #ifdef CONFIG_SLUB_DEBUG
7532 atomic_long_set(&n->nr_slabs, 0);
7533 atomic_long_set(&n->total_objects, 0);
7534 INIT_LIST_HEAD(&n->full);
7535 #endif
7536 }
7537
7538 #ifdef CONFIG_SLUB_STATS
alloc_kmem_cache_stats(struct kmem_cache * s)7539 static inline int alloc_kmem_cache_stats(struct kmem_cache *s)
7540 {
7541 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
7542 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
7543 sizeof(struct kmem_cache_stats));
7544
7545 s->cpu_stats = alloc_percpu(struct kmem_cache_stats);
7546
7547 if (!s->cpu_stats)
7548 return 0;
7549
7550 return 1;
7551 }
7552 #endif
7553
init_percpu_sheaves(struct kmem_cache * s)7554 static int init_percpu_sheaves(struct kmem_cache *s)
7555 {
7556 static struct slab_sheaf bootstrap_sheaf = {};
7557 int cpu;
7558
7559 for_each_possible_cpu(cpu) {
7560 struct slub_percpu_sheaves *pcs;
7561
7562 pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
7563
7564 local_trylock_init(&pcs->lock);
7565
7566 /*
7567 * Bootstrap sheaf has zero size so fast-path allocation fails.
7568 * It has also size == s->sheaf_capacity, so fast-path free
7569 * fails. In the slow paths we recognize the situation by
7570 * checking s->sheaf_capacity. This allows fast paths to assume
7571 * s->cpu_sheaves and pcs->main always exists and are valid.
7572 * It's also safe to share the single static bootstrap_sheaf
7573 * with zero-sized objects array as it's never modified.
7574 *
7575 * Bootstrap_sheaf also has NULL pointer to kmem_cache so we
7576 * recognize it and not attempt to free it when destroying the
7577 * cache.
7578 *
7579 * We keep bootstrap_sheaf for kmem_cache and kmem_cache_node,
7580 * caches with debug enabled, and all caches with SLUB_TINY.
7581 * For kmalloc caches it's used temporarily during the initial
7582 * bootstrap.
7583 */
7584 if (!s->sheaf_capacity)
7585 pcs->main = &bootstrap_sheaf;
7586 else
7587 pcs->main = alloc_empty_sheaf(s, GFP_KERNEL);
7588
7589 if (!pcs->main)
7590 return -ENOMEM;
7591 }
7592
7593 return 0;
7594 }
7595
7596 static struct kmem_cache *kmem_cache_node;
7597
7598 /*
7599 * No kmalloc_node yet so do it by hand. We know that this is the first
7600 * slab on the node for this slabcache. There are no concurrent accesses
7601 * possible.
7602 *
7603 * Note that this function only works on the kmem_cache_node
7604 * when allocating for the kmem_cache_node. This is used for bootstrapping
7605 * memory on a fresh node that has no slab structures yet.
7606 */
early_kmem_cache_node_alloc(int node)7607 static void early_kmem_cache_node_alloc(int node)
7608 {
7609 struct slab *slab;
7610 struct kmem_cache_node *n;
7611
7612 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
7613
7614 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
7615
7616 BUG_ON(!slab);
7617 if (slab_nid(slab) != node) {
7618 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
7619 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
7620 }
7621
7622 n = slab->freelist;
7623 BUG_ON(!n);
7624 #ifdef CONFIG_SLUB_DEBUG
7625 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
7626 #endif
7627 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
7628 slab->freelist = get_freepointer(kmem_cache_node, n);
7629 slab->inuse = 1;
7630 kmem_cache_node->per_node[node].node = n;
7631 init_kmem_cache_node(n);
7632 inc_slabs_node(kmem_cache_node, node, slab->objects);
7633
7634 /*
7635 * No locks need to be taken here as it has just been
7636 * initialized and there is no concurrent access.
7637 */
7638 __add_partial(n, slab, ADD_TO_HEAD);
7639 }
7640
free_kmem_cache_nodes(struct kmem_cache * s)7641 static void free_kmem_cache_nodes(struct kmem_cache *s)
7642 {
7643 int node;
7644 struct kmem_cache_node *n;
7645
7646 for_each_node(node) {
7647 struct node_barn *barn = get_barn_node(s, node);
7648
7649 if (!barn)
7650 continue;
7651
7652 WARN_ON(barn->nr_full);
7653 WARN_ON(barn->nr_empty);
7654 kfree(barn);
7655 s->per_node[node].barn = NULL;
7656 }
7657
7658 for_each_kmem_cache_node(s, node, n) {
7659 s->per_node[node].node = NULL;
7660 kmem_cache_free(kmem_cache_node, n);
7661 }
7662 }
7663
__kmem_cache_release(struct kmem_cache * s)7664 void __kmem_cache_release(struct kmem_cache *s)
7665 {
7666 cache_random_seq_destroy(s);
7667 pcs_destroy(s);
7668 #ifdef CONFIG_SLUB_STATS
7669 free_percpu(s->cpu_stats);
7670 #endif
7671 free_kmem_cache_nodes(s);
7672 }
7673
init_kmem_cache_nodes(struct kmem_cache * s)7674 static int init_kmem_cache_nodes(struct kmem_cache *s)
7675 {
7676 int node;
7677
7678 for_each_node_mask(node, slab_nodes) {
7679 struct kmem_cache_node *n;
7680
7681 if (slab_state == DOWN) {
7682 early_kmem_cache_node_alloc(node);
7683 continue;
7684 }
7685
7686 n = kmem_cache_alloc_node(kmem_cache_node,
7687 GFP_KERNEL, node);
7688 if (!n)
7689 return 0;
7690
7691 init_kmem_cache_node(n);
7692 s->per_node[node].node = n;
7693 }
7694
7695 if (slab_state == DOWN || !cache_has_sheaves(s))
7696 return 1;
7697
7698 for_each_node_mask(node, slab_barn_nodes) {
7699 struct node_barn *barn;
7700
7701 barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
7702
7703 if (!barn)
7704 return 0;
7705
7706 barn_init(barn);
7707 s->per_node[node].barn = barn;
7708 }
7709
7710 return 1;
7711 }
7712
calculate_sheaf_capacity(struct kmem_cache * s,struct kmem_cache_args * args)7713 static unsigned int calculate_sheaf_capacity(struct kmem_cache *s,
7714 struct kmem_cache_args *args)
7715
7716 {
7717 unsigned int capacity;
7718 size_t size;
7719
7720
7721 if (IS_ENABLED(CONFIG_SLUB_TINY) || s->flags & SLAB_DEBUG_FLAGS)
7722 return 0;
7723
7724 /*
7725 * Bootstrap caches can't have sheaves for now (SLAB_NO_OBJ_EXT).
7726 * SLAB_NOLEAKTRACE caches (e.g., kmemleak's object_cache) must not
7727 * have sheaves to avoid recursion when sheaf allocation triggers
7728 * kmemleak tracking.
7729 */
7730 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
7731 return 0;
7732
7733 /*
7734 * For now we use roughly similar formula (divided by two as there are
7735 * two percpu sheaves) as what was used for percpu partial slabs, which
7736 * should result in similar lock contention (barn or list_lock)
7737 */
7738 if (s->size >= PAGE_SIZE)
7739 capacity = 4;
7740 else if (s->size >= 1024)
7741 capacity = 12;
7742 else if (s->size >= 256)
7743 capacity = 26;
7744 else
7745 capacity = 60;
7746
7747 /* Increment capacity to make sheaf exactly a kmalloc size bucket */
7748 size = struct_size_t(struct slab_sheaf, objects, capacity);
7749 size = kmalloc_size_roundup(size);
7750 capacity = (size - struct_size_t(struct slab_sheaf, objects, 0)) / sizeof(void *);
7751
7752 /*
7753 * Respect an explicit request for capacity that's typically motivated by
7754 * expected maximum size of kmem_cache_prefill_sheaf() to not end up
7755 * using low-performance oversize sheaves
7756 */
7757 return max(capacity, args->sheaf_capacity);
7758 }
7759
7760 /*
7761 * calculate_sizes() determines the order and the distribution of data within
7762 * a slab object.
7763 */
calculate_sizes(struct kmem_cache_args * args,struct kmem_cache * s)7764 static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
7765 {
7766 slab_flags_t flags = s->flags;
7767 unsigned int size = s->object_size;
7768 unsigned int aligned_size;
7769 unsigned int order;
7770
7771 /*
7772 * Round up object size to the next word boundary. We can only
7773 * place the free pointer at word boundaries and this determines
7774 * the possible location of the free pointer.
7775 */
7776 size = ALIGN(size, sizeof(void *));
7777
7778 #ifdef CONFIG_SLUB_DEBUG
7779 /*
7780 * Determine if we can poison the object itself. If the user of
7781 * the slab may touch the object after free or before allocation
7782 * then we should never poison the object itself.
7783 */
7784 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
7785 !s->ctor)
7786 s->flags |= __OBJECT_POISON;
7787 else
7788 s->flags &= ~__OBJECT_POISON;
7789
7790
7791 /*
7792 * If we are Redzoning and there is no space between the end of the
7793 * object and the following fields, add one word so the right Redzone
7794 * is non-empty.
7795 */
7796 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
7797 size += sizeof(void *);
7798 #endif
7799
7800 /*
7801 * With that we have determined the number of bytes in actual use
7802 * by the object and redzoning.
7803 */
7804 s->inuse = size;
7805
7806 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
7807 (flags & SLAB_POISON) ||
7808 (s->ctor && !args->use_freeptr_offset) ||
7809 ((flags & SLAB_RED_ZONE) &&
7810 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
7811 /*
7812 * Relocate free pointer after the object if it is not
7813 * permitted to overwrite the first word of the object on
7814 * kmem_cache_free.
7815 *
7816 * This is the case if we do RCU, have a constructor, are
7817 * poisoning the objects, or are redzoning an object smaller
7818 * than sizeof(void *) or are redzoning an object with
7819 * slub_debug_orig_size() enabled, in which case the right
7820 * redzone may be extended.
7821 *
7822 * The assumption that s->offset >= s->inuse means free
7823 * pointer is outside of the object is used in the
7824 * freeptr_outside_object() function. If that is no
7825 * longer true, the function needs to be modified.
7826 */
7827 s->offset = size;
7828 size += sizeof(void *);
7829 } else if (((flags & SLAB_TYPESAFE_BY_RCU) || s->ctor) &&
7830 args->use_freeptr_offset) {
7831 s->offset = args->freeptr_offset;
7832 } else {
7833 /*
7834 * Store freelist pointer near middle of object to keep
7835 * it away from the edges of the object to avoid small
7836 * sized over/underflows from neighboring allocations.
7837 */
7838 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
7839 }
7840
7841 #ifdef CONFIG_SLUB_DEBUG
7842 if (flags & SLAB_STORE_USER) {
7843 /*
7844 * Need to store information about allocs and frees after
7845 * the object.
7846 */
7847 size += 2 * sizeof(struct track);
7848
7849 /* Save the original kmalloc request size */
7850 if (flags & SLAB_KMALLOC)
7851 size += sizeof(unsigned long);
7852 }
7853 #endif
7854
7855 kasan_cache_create(s, &size, &s->flags);
7856 #ifdef CONFIG_SLUB_DEBUG
7857 if (flags & SLAB_RED_ZONE) {
7858 /*
7859 * Add some empty padding so that we can catch
7860 * overwrites from earlier objects rather than let
7861 * tracking information or the free pointer be
7862 * corrupted if a user writes before the start
7863 * of the object.
7864 */
7865 size += sizeof(void *);
7866
7867 s->red_left_pad = sizeof(void *);
7868 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
7869 size += s->red_left_pad;
7870 }
7871 #endif
7872
7873 /*
7874 * SLUB stores one object immediately after another beginning from
7875 * offset 0. In order to align the objects we have to simply size
7876 * each object to conform to the alignment.
7877 */
7878 aligned_size = ALIGN(size, s->align);
7879 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
7880 if (slab_args_unmergeable(args, s->flags) &&
7881 (aligned_size - size >= sizeof(struct slabobj_ext)))
7882 s->flags |= SLAB_OBJ_EXT_IN_OBJ;
7883 #endif
7884 size = aligned_size;
7885
7886 s->size = size;
7887 s->reciprocal_size = reciprocal_value(size);
7888 order = calculate_order(size);
7889
7890 if ((int)order < 0)
7891 return 0;
7892
7893 s->allocflags = __GFP_COMP;
7894
7895 if (s->flags & SLAB_CACHE_DMA)
7896 s->allocflags |= GFP_DMA;
7897
7898 if (s->flags & SLAB_CACHE_DMA32)
7899 s->allocflags |= GFP_DMA32;
7900
7901 if (s->flags & SLAB_RECLAIM_ACCOUNT)
7902 s->allocflags |= __GFP_RECLAIMABLE;
7903
7904 /*
7905 * For KMALLOC_NORMAL caches we enable sheaves later by
7906 * bootstrap_kmalloc_sheaves() to avoid recursion
7907 */
7908 if (!is_kmalloc_normal(s))
7909 s->sheaf_capacity = calculate_sheaf_capacity(s, args);
7910
7911 /*
7912 * Determine the number of objects per slab
7913 */
7914 s->oo = oo_make(order, size);
7915 s->min = oo_make(get_order(size), size);
7916
7917 return !!oo_objects(s->oo);
7918 }
7919
list_slab_objects(struct kmem_cache * s,struct slab * slab)7920 static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
7921 {
7922 #ifdef CONFIG_SLUB_DEBUG
7923 void *addr = slab_address(slab);
7924 void *p;
7925
7926 if (!slab_add_kunit_errors())
7927 slab_bug(s, "Objects remaining on __kmem_cache_shutdown()");
7928
7929 spin_lock(&object_map_lock);
7930 __fill_map(object_map, s, slab);
7931
7932 for_each_object(p, s, addr, slab->objects) {
7933
7934 if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
7935 if (slab_add_kunit_errors())
7936 continue;
7937 pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
7938 print_tracking(s, p);
7939 }
7940 }
7941 spin_unlock(&object_map_lock);
7942
7943 __slab_err(slab);
7944 #endif
7945 }
7946
7947 /*
7948 * Attempt to free all partial slabs on a node.
7949 * This is called from __kmem_cache_shutdown(). We must take list_lock
7950 * because sysfs file might still access partial list after the shutdowning.
7951 */
free_partial(struct kmem_cache * s,struct kmem_cache_node * n)7952 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
7953 {
7954 LIST_HEAD(discard);
7955 struct slab *slab, *h;
7956
7957 BUG_ON(irqs_disabled());
7958 spin_lock_irq(&n->list_lock);
7959 list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
7960 if (!slab->inuse) {
7961 remove_partial(n, slab);
7962 list_add(&slab->slab_list, &discard);
7963 } else {
7964 list_slab_objects(s, slab);
7965 }
7966 }
7967 spin_unlock_irq(&n->list_lock);
7968
7969 list_for_each_entry_safe(slab, h, &discard, slab_list)
7970 discard_slab(s, slab);
7971 }
7972
__kmem_cache_empty(struct kmem_cache * s)7973 bool __kmem_cache_empty(struct kmem_cache *s)
7974 {
7975 int node;
7976 struct kmem_cache_node *n;
7977
7978 for_each_kmem_cache_node(s, node, n)
7979 if (n->nr_partial || node_nr_slabs(n))
7980 return false;
7981 return true;
7982 }
7983
7984 /*
7985 * Release all resources used by a slab cache.
7986 */
__kmem_cache_shutdown(struct kmem_cache * s)7987 int __kmem_cache_shutdown(struct kmem_cache *s)
7988 {
7989 int node;
7990 struct kmem_cache_node *n;
7991
7992 flush_all_cpus_locked(s);
7993
7994 /* we might have rcu sheaves in flight */
7995 if (cache_has_sheaves(s))
7996 rcu_barrier();
7997
7998 for_each_node(node) {
7999 struct node_barn *barn = get_barn_node(s, node);
8000
8001 if (barn)
8002 barn_shrink(s, barn);
8003 }
8004
8005 /* Attempt to free all objects */
8006 for_each_kmem_cache_node(s, node, n) {
8007 free_partial(s, n);
8008 if (n->nr_partial || node_nr_slabs(n))
8009 return 1;
8010 }
8011 return 0;
8012 }
8013
8014 #ifdef CONFIG_PRINTK
__kmem_obj_info(struct kmem_obj_info * kpp,void * object,struct slab * slab)8015 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
8016 {
8017 void *base;
8018 int __maybe_unused i;
8019 unsigned int objnr;
8020 void *objp;
8021 void *objp0;
8022 struct kmem_cache *s = slab->slab_cache;
8023 struct track __maybe_unused *trackp;
8024
8025 kpp->kp_ptr = object;
8026 kpp->kp_slab = slab;
8027 kpp->kp_slab_cache = s;
8028 base = slab_address(slab);
8029 objp0 = kasan_reset_tag(object);
8030 #ifdef CONFIG_SLUB_DEBUG
8031 objp = restore_red_left(s, objp0);
8032 #else
8033 objp = objp0;
8034 #endif
8035 objnr = obj_to_index(s, slab, objp);
8036 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
8037 objp = base + s->size * objnr;
8038 kpp->kp_objp = objp;
8039 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
8040 || (objp - base) % s->size) ||
8041 !(s->flags & SLAB_STORE_USER))
8042 return;
8043 #ifdef CONFIG_SLUB_DEBUG
8044 objp = fixup_red_left(s, objp);
8045 trackp = get_track(s, objp, TRACK_ALLOC);
8046 kpp->kp_ret = (void *)trackp->addr;
8047 #ifdef CONFIG_STACKDEPOT
8048 {
8049 depot_stack_handle_t handle;
8050 unsigned long *entries;
8051 unsigned int nr_entries;
8052
8053 handle = READ_ONCE(trackp->handle);
8054 if (handle) {
8055 nr_entries = stack_depot_fetch(handle, &entries);
8056 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
8057 kpp->kp_stack[i] = (void *)entries[i];
8058 }
8059
8060 trackp = get_track(s, objp, TRACK_FREE);
8061 handle = READ_ONCE(trackp->handle);
8062 if (handle) {
8063 nr_entries = stack_depot_fetch(handle, &entries);
8064 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
8065 kpp->kp_free_stack[i] = (void *)entries[i];
8066 }
8067 }
8068 #endif
8069 #endif
8070 }
8071 #endif
8072
8073 /********************************************************************
8074 * Kmalloc subsystem
8075 *******************************************************************/
8076
setup_slub_min_order(const char * str,const struct kernel_param * kp)8077 static int __init setup_slub_min_order(const char *str, const struct kernel_param *kp)
8078 {
8079 int ret;
8080
8081 ret = kstrtouint(str, 0, &slub_min_order);
8082 if (ret)
8083 return ret;
8084
8085 if (slub_min_order > slub_max_order)
8086 slub_max_order = slub_min_order;
8087
8088 return 0;
8089 }
8090
8091 static const struct kernel_param_ops param_ops_slab_min_order __initconst = {
8092 .set = setup_slub_min_order,
8093 };
8094 __core_param_cb(slab_min_order, ¶m_ops_slab_min_order, &slub_min_order, 0);
8095 __core_param_cb(slub_min_order, ¶m_ops_slab_min_order, &slub_min_order, 0);
8096
setup_slub_max_order(const char * str,const struct kernel_param * kp)8097 static int __init setup_slub_max_order(const char *str, const struct kernel_param *kp)
8098 {
8099 int ret;
8100
8101 ret = kstrtouint(str, 0, &slub_max_order);
8102 if (ret)
8103 return ret;
8104
8105 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
8106
8107 if (slub_min_order > slub_max_order)
8108 slub_min_order = slub_max_order;
8109
8110 return 0;
8111 }
8112
8113 static const struct kernel_param_ops param_ops_slab_max_order __initconst = {
8114 .set = setup_slub_max_order,
8115 };
8116 __core_param_cb(slab_max_order, ¶m_ops_slab_max_order, &slub_max_order, 0);
8117 __core_param_cb(slub_max_order, ¶m_ops_slab_max_order, &slub_max_order, 0);
8118
8119 core_param(slab_min_objects, slub_min_objects, uint, 0);
8120 core_param(slub_min_objects, slub_min_objects, uint, 0);
8121
8122 #ifdef CONFIG_NUMA
setup_slab_strict_numa(const char * str,const struct kernel_param * kp)8123 static int __init setup_slab_strict_numa(const char *str, const struct kernel_param *kp)
8124 {
8125 if (nr_node_ids > 1) {
8126 static_branch_enable(&strict_numa);
8127 pr_info("SLUB: Strict NUMA enabled.\n");
8128 } else {
8129 pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
8130 }
8131
8132 return 0;
8133 }
8134
8135 static const struct kernel_param_ops param_ops_slab_strict_numa __initconst = {
8136 .flags = KERNEL_PARAM_OPS_FL_NOARG,
8137 .set = setup_slab_strict_numa,
8138 };
8139 __core_param_cb(slab_strict_numa, ¶m_ops_slab_strict_numa, NULL, 0);
8140 #endif
8141
8142
8143 #ifdef CONFIG_HARDENED_USERCOPY
8144 /*
8145 * Rejects incorrectly sized objects and objects that are to be copied
8146 * to/from userspace but do not fall entirely within the containing slab
8147 * cache's usercopy region.
8148 *
8149 * Returns NULL if check passes, otherwise const char * to name of cache
8150 * to indicate an error.
8151 */
__check_heap_object(const void * ptr,unsigned long n,const struct slab * slab,bool to_user)8152 void __check_heap_object(const void *ptr, unsigned long n,
8153 const struct slab *slab, bool to_user)
8154 {
8155 struct kmem_cache *s;
8156 unsigned int offset;
8157 bool is_kfence = is_kfence_address(ptr);
8158
8159 ptr = kasan_reset_tag(ptr);
8160
8161 /* Find object and usable object size. */
8162 s = slab->slab_cache;
8163
8164 /* Reject impossible pointers. */
8165 if (ptr < slab_address(slab))
8166 usercopy_abort("SLUB object not in SLUB page?!", NULL,
8167 to_user, 0, n);
8168
8169 /* Find offset within object. */
8170 if (is_kfence)
8171 offset = ptr - kfence_object_start(ptr);
8172 else
8173 offset = (ptr - slab_address(slab)) % s->size;
8174
8175 /* Adjust for redzone and reject if within the redzone. */
8176 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
8177 if (offset < s->red_left_pad)
8178 usercopy_abort("SLUB object in left red zone",
8179 s->name, to_user, offset, n);
8180 offset -= s->red_left_pad;
8181 }
8182
8183 /* Allow address range falling entirely within usercopy region. */
8184 if (offset >= s->useroffset &&
8185 offset - s->useroffset <= s->usersize &&
8186 n <= s->useroffset - offset + s->usersize)
8187 return;
8188
8189 usercopy_abort("SLUB object", s->name, to_user, offset, n);
8190 }
8191 #endif /* CONFIG_HARDENED_USERCOPY */
8192
8193 #define SHRINK_PROMOTE_MAX 32
8194
8195 /*
8196 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
8197 * up most to the head of the partial lists. New allocations will then
8198 * fill those up and thus they can be removed from the partial lists.
8199 *
8200 * The slabs with the least items are placed last. This results in them
8201 * being allocated from last increasing the chance that the last objects
8202 * are freed in them.
8203 */
__kmem_cache_do_shrink(struct kmem_cache * s)8204 static int __kmem_cache_do_shrink(struct kmem_cache *s)
8205 {
8206 int node;
8207 int i;
8208 struct kmem_cache_node *n;
8209 struct slab *slab;
8210 struct slab *t;
8211 struct list_head discard;
8212 struct list_head promote[SHRINK_PROMOTE_MAX];
8213 unsigned long flags;
8214 int ret = 0;
8215
8216 for_each_node(node) {
8217 struct node_barn *barn = get_barn_node(s, node);
8218
8219 if (barn)
8220 barn_shrink(s, barn);
8221 }
8222
8223 for_each_kmem_cache_node(s, node, n) {
8224 INIT_LIST_HEAD(&discard);
8225 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
8226 INIT_LIST_HEAD(promote + i);
8227
8228 spin_lock_irqsave(&n->list_lock, flags);
8229
8230 /*
8231 * Build lists of slabs to discard or promote.
8232 *
8233 * Note that concurrent frees may occur while we hold the
8234 * list_lock. slab->inuse here is the upper limit.
8235 */
8236 list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
8237 int free = slab->objects - slab->inuse;
8238
8239 /* Do not reread slab->inuse */
8240 barrier();
8241
8242 /* We do not keep full slabs on the list */
8243 BUG_ON(free <= 0);
8244
8245 if (free == slab->objects) {
8246 list_move(&slab->slab_list, &discard);
8247 slab_clear_node_partial(slab);
8248 n->nr_partial--;
8249 dec_slabs_node(s, node, slab->objects);
8250 } else if (free <= SHRINK_PROMOTE_MAX)
8251 list_move(&slab->slab_list, promote + free - 1);
8252 }
8253
8254 /*
8255 * Promote the slabs filled up most to the head of the
8256 * partial list.
8257 */
8258 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
8259 list_splice(promote + i, &n->partial);
8260
8261 spin_unlock_irqrestore(&n->list_lock, flags);
8262
8263 /* Release empty slabs */
8264 list_for_each_entry_safe(slab, t, &discard, slab_list)
8265 free_slab(s, slab);
8266
8267 if (node_nr_slabs(n))
8268 ret = 1;
8269 }
8270
8271 return ret;
8272 }
8273
__kmem_cache_shrink(struct kmem_cache * s)8274 int __kmem_cache_shrink(struct kmem_cache *s)
8275 {
8276 flush_all(s);
8277 return __kmem_cache_do_shrink(s);
8278 }
8279
slab_mem_going_offline_callback(void)8280 static int slab_mem_going_offline_callback(void)
8281 {
8282 struct kmem_cache *s;
8283
8284 mutex_lock(&slab_mutex);
8285 list_for_each_entry(s, &slab_caches, list) {
8286 flush_all_cpus_locked(s);
8287 __kmem_cache_do_shrink(s);
8288 }
8289 mutex_unlock(&slab_mutex);
8290
8291 return 0;
8292 }
8293
slab_mem_going_online_callback(int nid)8294 static int slab_mem_going_online_callback(int nid)
8295 {
8296 struct kmem_cache_node *n;
8297 struct kmem_cache *s;
8298 int ret = 0;
8299
8300 /*
8301 * We are bringing a node online. No memory is available yet. We must
8302 * allocate a kmem_cache_node structure in order to bring the node
8303 * online.
8304 */
8305 mutex_lock(&slab_mutex);
8306 list_for_each_entry(s, &slab_caches, list) {
8307 struct node_barn *barn = NULL;
8308
8309 /*
8310 * The structure may already exist if the node was previously
8311 * onlined and offlined.
8312 */
8313 if (get_node(s, nid))
8314 continue;
8315
8316 if (cache_has_sheaves(s) && !get_barn_node(s, nid)) {
8317
8318 barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid);
8319
8320 if (!barn) {
8321 ret = -ENOMEM;
8322 goto out;
8323 }
8324 }
8325
8326 /*
8327 * XXX: kmem_cache_alloc_node will fallback to other nodes
8328 * since memory is not yet available from the node that
8329 * is brought up.
8330 */
8331 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
8332 if (!n) {
8333 kfree(barn);
8334 ret = -ENOMEM;
8335 goto out;
8336 }
8337
8338 init_kmem_cache_node(n);
8339 s->per_node[nid].node = n;
8340
8341 if (barn) {
8342 barn_init(barn);
8343 s->per_node[nid].barn = barn;
8344 }
8345 }
8346 /*
8347 * Any cache created after this point will also have kmem_cache_node
8348 * and barn initialized for the new node.
8349 */
8350 node_set(nid, slab_nodes);
8351 node_set(nid, slab_barn_nodes);
8352 out:
8353 mutex_unlock(&slab_mutex);
8354 return ret;
8355 }
8356
slab_memory_callback(struct notifier_block * self,unsigned long action,void * arg)8357 static int slab_memory_callback(struct notifier_block *self,
8358 unsigned long action, void *arg)
8359 {
8360 struct node_notify *nn = arg;
8361 int nid = nn->nid;
8362 int ret = 0;
8363
8364 switch (action) {
8365 case NODE_ADDING_FIRST_MEMORY:
8366 ret = slab_mem_going_online_callback(nid);
8367 break;
8368 case NODE_REMOVING_LAST_MEMORY:
8369 ret = slab_mem_going_offline_callback();
8370 break;
8371 }
8372 if (ret)
8373 ret = notifier_from_errno(ret);
8374 else
8375 ret = NOTIFY_OK;
8376 return ret;
8377 }
8378
8379 /********************************************************************
8380 * Basic setup of slabs
8381 *******************************************************************/
8382
8383 /*
8384 * Used for early kmem_cache structures that were allocated using
8385 * the page allocator. Allocate them properly then fix up the pointers
8386 * that may be pointing to the wrong kmem_cache structure.
8387 */
8388
bootstrap(struct kmem_cache * static_cache)8389 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
8390 {
8391 int node;
8392 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
8393 struct kmem_cache_node *n;
8394
8395 memcpy(s, static_cache, kmem_cache->object_size);
8396
8397 for_each_kmem_cache_node(s, node, n) {
8398 struct slab *p;
8399
8400 list_for_each_entry(p, &n->partial, slab_list)
8401 p->slab_cache = s;
8402
8403 #ifdef CONFIG_SLUB_DEBUG
8404 list_for_each_entry(p, &n->full, slab_list)
8405 p->slab_cache = s;
8406 #endif
8407 }
8408 list_add(&s->list, &slab_caches);
8409 return s;
8410 }
8411
8412 /*
8413 * Finish the sheaves initialization done normally by init_percpu_sheaves() and
8414 * init_kmem_cache_nodes(). For normal kmalloc caches we have to bootstrap it
8415 * since sheaves and barns are allocated by kmalloc.
8416 */
bootstrap_cache_sheaves(struct kmem_cache * s)8417 static void __init bootstrap_cache_sheaves(struct kmem_cache *s)
8418 {
8419 struct kmem_cache_args empty_args = {};
8420 unsigned int capacity;
8421 bool failed = false;
8422 int node, cpu;
8423
8424 capacity = calculate_sheaf_capacity(s, &empty_args);
8425
8426 /* capacity can be 0 due to debugging or SLUB_TINY */
8427 if (!capacity)
8428 return;
8429
8430 for_each_node_mask(node, slab_barn_nodes) {
8431 struct node_barn *barn;
8432
8433 barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
8434
8435 if (!barn) {
8436 failed = true;
8437 goto out;
8438 }
8439
8440 barn_init(barn);
8441 s->per_node[node].barn = barn;
8442 }
8443
8444 for_each_possible_cpu(cpu) {
8445 struct slub_percpu_sheaves *pcs;
8446
8447 pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
8448
8449 pcs->main = __alloc_empty_sheaf(s, GFP_KERNEL, capacity);
8450
8451 if (!pcs->main) {
8452 failed = true;
8453 break;
8454 }
8455 }
8456
8457 out:
8458 /*
8459 * It's still early in boot so treat this like same as a failure to
8460 * create the kmalloc cache in the first place
8461 */
8462 if (failed)
8463 panic("Out of memory when creating kmem_cache %s\n", s->name);
8464
8465 s->sheaf_capacity = capacity;
8466 }
8467
bootstrap_kmalloc_sheaves(void)8468 static void __init bootstrap_kmalloc_sheaves(void)
8469 {
8470 enum kmalloc_cache_type type;
8471
8472 for (type = KMALLOC_NORMAL; type <= KMALLOC_RANDOM_END; type++) {
8473 for (int idx = 0; idx < KMALLOC_SHIFT_HIGH + 1; idx++) {
8474 if (kmalloc_caches[type][idx])
8475 bootstrap_cache_sheaves(kmalloc_caches[type][idx]);
8476 }
8477 }
8478 }
8479
kmem_cache_init(void)8480 void __init kmem_cache_init(void)
8481 {
8482 static __initdata struct kmem_cache boot_kmem_cache,
8483 boot_kmem_cache_node;
8484 int node;
8485
8486 if (debug_guardpage_minorder())
8487 slub_max_order = 0;
8488
8489 /* Inform pointer hashing choice about slub debugging state. */
8490 hash_pointers_finalize(__slub_debug_enabled());
8491
8492 kmem_cache_node = &boot_kmem_cache_node;
8493 kmem_cache = &boot_kmem_cache;
8494
8495 /*
8496 * Initialize the nodemask for which we will allocate per node
8497 * structures. Here we don't need taking slab_mutex yet.
8498 */
8499 for_each_node_state(node, N_MEMORY)
8500 node_set(node, slab_nodes);
8501
8502 for_each_online_node(node)
8503 node_set(node, slab_barn_nodes);
8504
8505 create_boot_cache(kmem_cache_node, "kmem_cache_node",
8506 sizeof(struct kmem_cache_node),
8507 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
8508
8509 hotplug_node_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
8510
8511 /* Able to allocate the per node structures */
8512 slab_state = PARTIAL;
8513
8514 create_boot_cache(kmem_cache, "kmem_cache",
8515 offsetof(struct kmem_cache, per_node) +
8516 nr_node_ids * sizeof(struct kmem_cache_per_node_ptrs),
8517 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
8518
8519 kmem_cache = bootstrap(&boot_kmem_cache);
8520 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
8521
8522 /* Now we can use the kmem_cache to allocate kmalloc slabs */
8523 setup_kmalloc_cache_index_table();
8524 create_kmalloc_caches();
8525
8526 bootstrap_kmalloc_sheaves();
8527
8528 /* Setup random freelists for each cache */
8529 init_freelist_randomization();
8530
8531 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", slub_cpu_setup,
8532 slub_cpu_dead);
8533
8534 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
8535 cache_line_size(),
8536 slub_min_order, slub_max_order, slub_min_objects,
8537 nr_cpu_ids, nr_node_ids);
8538 }
8539
kmem_cache_init_late(void)8540 void __init kmem_cache_init_late(void)
8541 {
8542 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM | WQ_PERCPU,
8543 0);
8544 WARN_ON(!flushwq);
8545 #ifdef CONFIG_SLAB_FREELIST_RANDOM
8546 prandom_init_once(&slab_rnd_state);
8547 #endif
8548 }
8549
do_kmem_cache_create(struct kmem_cache * s,const char * name,unsigned int size,struct kmem_cache_args * args,slab_flags_t flags)8550 int do_kmem_cache_create(struct kmem_cache *s, const char *name,
8551 unsigned int size, struct kmem_cache_args *args,
8552 slab_flags_t flags)
8553 {
8554 int err = -EINVAL;
8555
8556 s->name = name;
8557 s->size = s->object_size = size;
8558
8559 s->flags = kmem_cache_flags(flags, s->name);
8560 #ifdef CONFIG_SLAB_FREELIST_HARDENED
8561 s->random = get_random_long();
8562 #endif
8563 s->align = args->align;
8564 s->ctor = args->ctor;
8565 #ifdef CONFIG_HARDENED_USERCOPY
8566 s->useroffset = args->useroffset;
8567 s->usersize = args->usersize;
8568 #endif
8569
8570 if (!calculate_sizes(args, s))
8571 goto out;
8572 if (disable_higher_order_debug) {
8573 /*
8574 * Disable debugging flags that store metadata if the min slab
8575 * order increased.
8576 */
8577 if (get_order(s->size) > get_order(s->object_size)) {
8578 s->flags &= ~DEBUG_METADATA_FLAGS;
8579 s->offset = 0;
8580 if (!calculate_sizes(args, s))
8581 goto out;
8582 }
8583 }
8584
8585 #ifdef system_has_freelist_aba
8586 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
8587 /* Enable fast mode */
8588 s->flags |= __CMPXCHG_DOUBLE;
8589 }
8590 #endif
8591
8592 /*
8593 * The larger the object size is, the more slabs we want on the partial
8594 * list to avoid pounding the page allocator excessively.
8595 */
8596 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
8597 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
8598
8599 s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves);
8600 if (!s->cpu_sheaves) {
8601 err = -ENOMEM;
8602 goto out;
8603 }
8604
8605 #ifdef CONFIG_NUMA
8606 s->remote_node_defrag_ratio = 1000;
8607 #endif
8608
8609 /* Initialize the pre-computed randomized freelist if slab is up */
8610 if (slab_state >= UP) {
8611 if (init_cache_random_seq(s))
8612 goto out;
8613 }
8614
8615 if (!init_kmem_cache_nodes(s))
8616 goto out;
8617
8618 #ifdef CONFIG_SLUB_STATS
8619 if (!alloc_kmem_cache_stats(s))
8620 goto out;
8621 #endif
8622
8623 err = init_percpu_sheaves(s);
8624 if (err)
8625 goto out;
8626
8627 err = 0;
8628
8629 /* Mutex is not taken during early boot */
8630 if (slab_state <= UP)
8631 goto out;
8632
8633 /*
8634 * Failing to create sysfs files is not critical to SLUB functionality.
8635 * If it fails, proceed with cache creation without these files.
8636 */
8637 if (sysfs_slab_add(s))
8638 pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name);
8639
8640 if (s->flags & SLAB_STORE_USER)
8641 debugfs_slab_add(s);
8642
8643 out:
8644 if (err)
8645 __kmem_cache_release(s);
8646 return err;
8647 }
8648
8649 #ifdef SLAB_SUPPORTS_SYSFS
count_inuse(struct slab * slab)8650 static int count_inuse(struct slab *slab)
8651 {
8652 return slab->inuse;
8653 }
8654
count_total(struct slab * slab)8655 static int count_total(struct slab *slab)
8656 {
8657 return slab->objects;
8658 }
8659 #endif
8660
8661 #ifdef CONFIG_SLUB_DEBUG
validate_slab(struct kmem_cache * s,struct slab * slab,unsigned long * obj_map)8662 static void validate_slab(struct kmem_cache *s, struct slab *slab,
8663 unsigned long *obj_map)
8664 {
8665 void *p;
8666 void *addr = slab_address(slab);
8667
8668 if (!validate_slab_ptr(slab)) {
8669 slab_err(s, slab, "Not a valid slab page");
8670 return;
8671 }
8672
8673 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
8674 return;
8675
8676 /* Now we know that a valid freelist exists */
8677 __fill_map(obj_map, s, slab);
8678 for_each_object(p, s, addr, slab->objects) {
8679 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
8680 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
8681
8682 if (!check_object(s, slab, p, val))
8683 break;
8684 }
8685 }
8686
validate_slab_node(struct kmem_cache * s,struct kmem_cache_node * n,unsigned long * obj_map)8687 static int validate_slab_node(struct kmem_cache *s,
8688 struct kmem_cache_node *n, unsigned long *obj_map)
8689 {
8690 unsigned long count = 0;
8691 struct slab *slab;
8692 unsigned long flags;
8693
8694 spin_lock_irqsave(&n->list_lock, flags);
8695
8696 list_for_each_entry(slab, &n->partial, slab_list) {
8697 validate_slab(s, slab, obj_map);
8698 count++;
8699 }
8700 if (count != n->nr_partial) {
8701 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
8702 s->name, count, n->nr_partial);
8703 slab_add_kunit_errors();
8704 }
8705
8706 if (!(s->flags & SLAB_STORE_USER))
8707 goto out;
8708
8709 list_for_each_entry(slab, &n->full, slab_list) {
8710 validate_slab(s, slab, obj_map);
8711 count++;
8712 }
8713 if (count != node_nr_slabs(n)) {
8714 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
8715 s->name, count, node_nr_slabs(n));
8716 slab_add_kunit_errors();
8717 }
8718
8719 out:
8720 spin_unlock_irqrestore(&n->list_lock, flags);
8721 return count;
8722 }
8723
validate_slab_cache(struct kmem_cache * s)8724 long validate_slab_cache(struct kmem_cache *s)
8725 {
8726 int node;
8727 unsigned long count = 0;
8728 struct kmem_cache_node *n;
8729 unsigned long *obj_map;
8730
8731 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
8732 if (!obj_map)
8733 return -ENOMEM;
8734
8735 flush_all(s);
8736 for_each_kmem_cache_node(s, node, n)
8737 count += validate_slab_node(s, n, obj_map);
8738
8739 bitmap_free(obj_map);
8740
8741 return count;
8742 }
8743 EXPORT_SYMBOL(validate_slab_cache);
8744
8745 #ifdef CONFIG_DEBUG_FS
8746 /*
8747 * Generate lists of code addresses where slabcache objects are allocated
8748 * and freed.
8749 */
8750
8751 struct location {
8752 depot_stack_handle_t handle;
8753 unsigned long count;
8754 unsigned long addr;
8755 unsigned long waste;
8756 long long sum_time;
8757 long min_time;
8758 long max_time;
8759 long min_pid;
8760 long max_pid;
8761 DECLARE_BITMAP(cpus, NR_CPUS);
8762 nodemask_t nodes;
8763 };
8764
8765 struct loc_track {
8766 unsigned long max;
8767 unsigned long count;
8768 struct location *loc;
8769 loff_t idx;
8770 };
8771
8772 static struct dentry *slab_debugfs_root;
8773
free_loc_track(struct loc_track * t)8774 static void free_loc_track(struct loc_track *t)
8775 {
8776 if (t->max)
8777 free_pages((unsigned long)t->loc,
8778 get_order(sizeof(struct location) * t->max));
8779 }
8780
alloc_loc_track(struct loc_track * t,unsigned long max,gfp_t flags)8781 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
8782 {
8783 struct location *l;
8784 int order;
8785
8786 order = get_order(sizeof(struct location) * max);
8787
8788 l = (void *)__get_free_pages(flags, order);
8789 if (!l)
8790 return 0;
8791
8792 if (t->count) {
8793 memcpy(l, t->loc, sizeof(struct location) * t->count);
8794 free_loc_track(t);
8795 }
8796 t->max = max;
8797 t->loc = l;
8798 return 1;
8799 }
8800
add_location(struct loc_track * t,struct kmem_cache * s,const struct track * track,unsigned int orig_size)8801 static int add_location(struct loc_track *t, struct kmem_cache *s,
8802 const struct track *track,
8803 unsigned int orig_size)
8804 {
8805 long start, end, pos;
8806 struct location *l;
8807 unsigned long caddr, chandle, cwaste;
8808 unsigned long age = jiffies - track->when;
8809 depot_stack_handle_t handle = 0;
8810 unsigned int waste = s->object_size - orig_size;
8811
8812 #ifdef CONFIG_STACKDEPOT
8813 handle = READ_ONCE(track->handle);
8814 #endif
8815 start = -1;
8816 end = t->count;
8817
8818 for ( ; ; ) {
8819 pos = start + (end - start + 1) / 2;
8820
8821 /*
8822 * There is nothing at "end". If we end up there
8823 * we need to add something to before end.
8824 */
8825 if (pos == end)
8826 break;
8827
8828 l = &t->loc[pos];
8829 caddr = l->addr;
8830 chandle = l->handle;
8831 cwaste = l->waste;
8832 if ((track->addr == caddr) && (handle == chandle) &&
8833 (waste == cwaste)) {
8834
8835 l->count++;
8836 if (track->when) {
8837 l->sum_time += age;
8838 if (age < l->min_time)
8839 l->min_time = age;
8840 if (age > l->max_time)
8841 l->max_time = age;
8842
8843 if (track->pid < l->min_pid)
8844 l->min_pid = track->pid;
8845 if (track->pid > l->max_pid)
8846 l->max_pid = track->pid;
8847
8848 cpumask_set_cpu(track->cpu,
8849 to_cpumask(l->cpus));
8850 }
8851 node_set(page_to_nid(virt_to_page(track)), l->nodes);
8852 return 1;
8853 }
8854
8855 if (track->addr < caddr)
8856 end = pos;
8857 else if (track->addr == caddr && handle < chandle)
8858 end = pos;
8859 else if (track->addr == caddr && handle == chandle &&
8860 waste < cwaste)
8861 end = pos;
8862 else
8863 start = pos;
8864 }
8865
8866 /*
8867 * Not found. Insert new tracking element.
8868 */
8869 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
8870 return 0;
8871
8872 l = t->loc + pos;
8873 if (pos < t->count)
8874 memmove(l + 1, l,
8875 (t->count - pos) * sizeof(struct location));
8876 t->count++;
8877 l->count = 1;
8878 l->addr = track->addr;
8879 l->sum_time = age;
8880 l->min_time = age;
8881 l->max_time = age;
8882 l->min_pid = track->pid;
8883 l->max_pid = track->pid;
8884 l->handle = handle;
8885 l->waste = waste;
8886 cpumask_clear(to_cpumask(l->cpus));
8887 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
8888 nodes_clear(l->nodes);
8889 node_set(page_to_nid(virt_to_page(track)), l->nodes);
8890 return 1;
8891 }
8892
process_slab(struct loc_track * t,struct kmem_cache * s,struct slab * slab,enum track_item alloc,unsigned long * obj_map)8893 static void process_slab(struct loc_track *t, struct kmem_cache *s,
8894 struct slab *slab, enum track_item alloc,
8895 unsigned long *obj_map)
8896 {
8897 void *addr = slab_address(slab);
8898 bool is_alloc = (alloc == TRACK_ALLOC);
8899 void *p;
8900
8901 __fill_map(obj_map, s, slab);
8902
8903 for_each_object(p, s, addr, slab->objects)
8904 if (!test_bit(__obj_to_index(s, addr, p), obj_map))
8905 add_location(t, s, get_track(s, p, alloc),
8906 is_alloc ? get_orig_size(s, p) :
8907 s->object_size);
8908 }
8909 #endif /* CONFIG_DEBUG_FS */
8910 #endif /* CONFIG_SLUB_DEBUG */
8911
8912 #ifdef SLAB_SUPPORTS_SYSFS
8913 enum slab_stat_type {
8914 SL_ALL, /* All slabs */
8915 SL_PARTIAL, /* Only partially allocated slabs */
8916 SL_CPU, /* Only slabs used for cpu caches */
8917 SL_OBJECTS, /* Determine allocated objects not slabs */
8918 SL_TOTAL /* Determine object capacity not slabs */
8919 };
8920
8921 #define SO_ALL (1 << SL_ALL)
8922 #define SO_PARTIAL (1 << SL_PARTIAL)
8923 #define SO_CPU (1 << SL_CPU)
8924 #define SO_OBJECTS (1 << SL_OBJECTS)
8925 #define SO_TOTAL (1 << SL_TOTAL)
8926
show_slab_objects(struct kmem_cache * s,char * buf,unsigned long flags)8927 static ssize_t show_slab_objects(struct kmem_cache *s,
8928 char *buf, unsigned long flags)
8929 {
8930 unsigned long total = 0;
8931 int node;
8932 int x;
8933 unsigned long *nodes;
8934 int len = 0;
8935
8936 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
8937 if (!nodes)
8938 return -ENOMEM;
8939
8940 /*
8941 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
8942 * already held which will conflict with an existing lock order:
8943 *
8944 * mem_hotplug_lock->slab_mutex->kernfs_mutex
8945 *
8946 * We don't really need mem_hotplug_lock (to hold off
8947 * slab_mem_going_offline_callback) here because slab's memory hot
8948 * unplug code doesn't destroy the kmem_cache->node[] data.
8949 */
8950
8951 #ifdef CONFIG_SLUB_DEBUG
8952 if (flags & SO_ALL) {
8953 struct kmem_cache_node *n;
8954
8955 for_each_kmem_cache_node(s, node, n) {
8956
8957 if (flags & SO_TOTAL)
8958 x = node_nr_objs(n);
8959 else if (flags & SO_OBJECTS)
8960 x = node_nr_objs(n) - count_partial(n, count_free);
8961 else
8962 x = node_nr_slabs(n);
8963 total += x;
8964 nodes[node] += x;
8965 }
8966
8967 } else
8968 #endif
8969 if (flags & SO_PARTIAL) {
8970 struct kmem_cache_node *n;
8971
8972 for_each_kmem_cache_node(s, node, n) {
8973 if (flags & SO_TOTAL)
8974 x = count_partial(n, count_total);
8975 else if (flags & SO_OBJECTS)
8976 x = count_partial(n, count_inuse);
8977 else
8978 x = n->nr_partial;
8979 total += x;
8980 nodes[node] += x;
8981 }
8982 }
8983
8984 len += sysfs_emit_at(buf, len, "%lu", total);
8985 #ifdef CONFIG_NUMA
8986 for (node = 0; node < nr_node_ids; node++) {
8987 if (nodes[node])
8988 len += sysfs_emit_at(buf, len, " N%d=%lu",
8989 node, nodes[node]);
8990 }
8991 #endif
8992 len += sysfs_emit_at(buf, len, "\n");
8993 kfree(nodes);
8994
8995 return len;
8996 }
8997
8998 #define to_slab_attr(n) container_of_const(n, struct slab_attribute, attr)
8999 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
9000
9001 struct slab_attribute {
9002 struct attribute attr;
9003 ssize_t (*show)(struct kmem_cache *s, char *buf);
9004 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
9005 };
9006
9007 #define SLAB_ATTR_RO(_name) \
9008 static const struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
9009
9010 #define SLAB_ATTR(_name) \
9011 static const struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
9012
slab_size_show(struct kmem_cache * s,char * buf)9013 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
9014 {
9015 return sysfs_emit(buf, "%u\n", s->size);
9016 }
9017 SLAB_ATTR_RO(slab_size);
9018
align_show(struct kmem_cache * s,char * buf)9019 static ssize_t align_show(struct kmem_cache *s, char *buf)
9020 {
9021 return sysfs_emit(buf, "%u\n", s->align);
9022 }
9023 SLAB_ATTR_RO(align);
9024
object_size_show(struct kmem_cache * s,char * buf)9025 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
9026 {
9027 return sysfs_emit(buf, "%u\n", s->object_size);
9028 }
9029 SLAB_ATTR_RO(object_size);
9030
objs_per_slab_show(struct kmem_cache * s,char * buf)9031 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
9032 {
9033 return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
9034 }
9035 SLAB_ATTR_RO(objs_per_slab);
9036
order_show(struct kmem_cache * s,char * buf)9037 static ssize_t order_show(struct kmem_cache *s, char *buf)
9038 {
9039 return sysfs_emit(buf, "%u\n", oo_order(s->oo));
9040 }
9041 SLAB_ATTR_RO(order);
9042
sheaf_capacity_show(struct kmem_cache * s,char * buf)9043 static ssize_t sheaf_capacity_show(struct kmem_cache *s, char *buf)
9044 {
9045 return sysfs_emit(buf, "%u\n", s->sheaf_capacity);
9046 }
9047 SLAB_ATTR_RO(sheaf_capacity);
9048
min_partial_show(struct kmem_cache * s,char * buf)9049 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
9050 {
9051 return sysfs_emit(buf, "%lu\n", s->min_partial);
9052 }
9053
min_partial_store(struct kmem_cache * s,const char * buf,size_t length)9054 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
9055 size_t length)
9056 {
9057 unsigned long min;
9058 int err;
9059
9060 err = kstrtoul(buf, 10, &min);
9061 if (err)
9062 return err;
9063
9064 s->min_partial = min;
9065 return length;
9066 }
9067 SLAB_ATTR(min_partial);
9068
cpu_partial_show(struct kmem_cache * s,char * buf)9069 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
9070 {
9071 return sysfs_emit(buf, "0\n");
9072 }
9073
cpu_partial_store(struct kmem_cache * s,const char * buf,size_t length)9074 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
9075 size_t length)
9076 {
9077 unsigned int objects;
9078 int err;
9079
9080 err = kstrtouint(buf, 10, &objects);
9081 if (err)
9082 return err;
9083 if (objects)
9084 return -EINVAL;
9085
9086 return length;
9087 }
9088 SLAB_ATTR(cpu_partial);
9089
ctor_show(struct kmem_cache * s,char * buf)9090 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
9091 {
9092 if (!s->ctor)
9093 return 0;
9094 return sysfs_emit(buf, "%pS\n", s->ctor);
9095 }
9096 SLAB_ATTR_RO(ctor);
9097
aliases_show(struct kmem_cache * s,char * buf)9098 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
9099 {
9100 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
9101 }
9102 SLAB_ATTR_RO(aliases);
9103
partial_show(struct kmem_cache * s,char * buf)9104 static ssize_t partial_show(struct kmem_cache *s, char *buf)
9105 {
9106 return show_slab_objects(s, buf, SO_PARTIAL);
9107 }
9108 SLAB_ATTR_RO(partial);
9109
cpu_slabs_show(struct kmem_cache * s,char * buf)9110 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
9111 {
9112 return show_slab_objects(s, buf, SO_CPU);
9113 }
9114 SLAB_ATTR_RO(cpu_slabs);
9115
objects_partial_show(struct kmem_cache * s,char * buf)9116 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
9117 {
9118 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
9119 }
9120 SLAB_ATTR_RO(objects_partial);
9121
slabs_cpu_partial_show(struct kmem_cache * s,char * buf)9122 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
9123 {
9124 return sysfs_emit(buf, "0(0)\n");
9125 }
9126 SLAB_ATTR_RO(slabs_cpu_partial);
9127
reclaim_account_show(struct kmem_cache * s,char * buf)9128 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
9129 {
9130 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
9131 }
9132 SLAB_ATTR_RO(reclaim_account);
9133
hwcache_align_show(struct kmem_cache * s,char * buf)9134 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
9135 {
9136 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
9137 }
9138 SLAB_ATTR_RO(hwcache_align);
9139
9140 #ifdef CONFIG_ZONE_DMA
cache_dma_show(struct kmem_cache * s,char * buf)9141 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
9142 {
9143 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
9144 }
9145 SLAB_ATTR_RO(cache_dma);
9146 #endif
9147
9148 #ifdef CONFIG_HARDENED_USERCOPY
usersize_show(struct kmem_cache * s,char * buf)9149 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
9150 {
9151 return sysfs_emit(buf, "%u\n", s->usersize);
9152 }
9153 SLAB_ATTR_RO(usersize);
9154 #endif
9155
destroy_by_rcu_show(struct kmem_cache * s,char * buf)9156 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
9157 {
9158 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
9159 }
9160 SLAB_ATTR_RO(destroy_by_rcu);
9161
9162 #ifdef CONFIG_SLUB_DEBUG
slabs_show(struct kmem_cache * s,char * buf)9163 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
9164 {
9165 return show_slab_objects(s, buf, SO_ALL);
9166 }
9167 SLAB_ATTR_RO(slabs);
9168
total_objects_show(struct kmem_cache * s,char * buf)9169 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
9170 {
9171 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
9172 }
9173 SLAB_ATTR_RO(total_objects);
9174
objects_show(struct kmem_cache * s,char * buf)9175 static ssize_t objects_show(struct kmem_cache *s, char *buf)
9176 {
9177 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
9178 }
9179 SLAB_ATTR_RO(objects);
9180
sanity_checks_show(struct kmem_cache * s,char * buf)9181 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
9182 {
9183 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
9184 }
9185 SLAB_ATTR_RO(sanity_checks);
9186
trace_show(struct kmem_cache * s,char * buf)9187 static ssize_t trace_show(struct kmem_cache *s, char *buf)
9188 {
9189 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
9190 }
9191 SLAB_ATTR_RO(trace);
9192
red_zone_show(struct kmem_cache * s,char * buf)9193 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
9194 {
9195 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
9196 }
9197
9198 SLAB_ATTR_RO(red_zone);
9199
poison_show(struct kmem_cache * s,char * buf)9200 static ssize_t poison_show(struct kmem_cache *s, char *buf)
9201 {
9202 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
9203 }
9204
9205 SLAB_ATTR_RO(poison);
9206
store_user_show(struct kmem_cache * s,char * buf)9207 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
9208 {
9209 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
9210 }
9211
9212 SLAB_ATTR_RO(store_user);
9213
validate_show(struct kmem_cache * s,char * buf)9214 static ssize_t validate_show(struct kmem_cache *s, char *buf)
9215 {
9216 return 0;
9217 }
9218
validate_store(struct kmem_cache * s,const char * buf,size_t length)9219 static ssize_t validate_store(struct kmem_cache *s,
9220 const char *buf, size_t length)
9221 {
9222 int ret = -EINVAL;
9223
9224 if (buf[0] == '1' && kmem_cache_debug(s)) {
9225 ret = validate_slab_cache(s);
9226 if (ret >= 0)
9227 ret = length;
9228 }
9229 return ret;
9230 }
9231 SLAB_ATTR(validate);
9232
9233 #endif /* CONFIG_SLUB_DEBUG */
9234
9235 #ifdef CONFIG_FAILSLAB
failslab_show(struct kmem_cache * s,char * buf)9236 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
9237 {
9238 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
9239 }
9240
failslab_store(struct kmem_cache * s,const char * buf,size_t length)9241 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
9242 size_t length)
9243 {
9244 if (s->refcount > 1)
9245 return -EINVAL;
9246
9247 if (buf[0] == '1')
9248 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
9249 else
9250 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
9251
9252 return length;
9253 }
9254 SLAB_ATTR(failslab);
9255 #endif
9256
shrink_show(struct kmem_cache * s,char * buf)9257 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
9258 {
9259 return 0;
9260 }
9261
shrink_store(struct kmem_cache * s,const char * buf,size_t length)9262 static ssize_t shrink_store(struct kmem_cache *s,
9263 const char *buf, size_t length)
9264 {
9265 if (buf[0] == '1')
9266 kmem_cache_shrink(s);
9267 else
9268 return -EINVAL;
9269 return length;
9270 }
9271 SLAB_ATTR(shrink);
9272
9273 #ifdef CONFIG_NUMA
remote_node_defrag_ratio_show(struct kmem_cache * s,char * buf)9274 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
9275 {
9276 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
9277 }
9278
remote_node_defrag_ratio_store(struct kmem_cache * s,const char * buf,size_t length)9279 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
9280 const char *buf, size_t length)
9281 {
9282 unsigned int ratio;
9283 int err;
9284
9285 err = kstrtouint(buf, 10, &ratio);
9286 if (err)
9287 return err;
9288 if (ratio > 100)
9289 return -ERANGE;
9290
9291 s->remote_node_defrag_ratio = ratio * 10;
9292
9293 return length;
9294 }
9295 SLAB_ATTR(remote_node_defrag_ratio);
9296 #endif
9297
9298 #ifdef CONFIG_SLUB_STATS
show_stat(struct kmem_cache * s,char * buf,enum stat_item si)9299 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
9300 {
9301 unsigned long sum = 0;
9302 int cpu;
9303 int len = 0;
9304 int *data = kmalloc_objs(int, nr_cpu_ids);
9305
9306 if (!data)
9307 return -ENOMEM;
9308
9309 for_each_online_cpu(cpu) {
9310 unsigned int x = per_cpu_ptr(s->cpu_stats, cpu)->stat[si];
9311
9312 data[cpu] = x;
9313 sum += x;
9314 }
9315
9316 len += sysfs_emit_at(buf, len, "%lu", sum);
9317
9318 #ifdef CONFIG_SMP
9319 for_each_online_cpu(cpu) {
9320 if (data[cpu])
9321 len += sysfs_emit_at(buf, len, " C%d=%u",
9322 cpu, data[cpu]);
9323 }
9324 #endif
9325 kfree(data);
9326 len += sysfs_emit_at(buf, len, "\n");
9327
9328 return len;
9329 }
9330
clear_stat(struct kmem_cache * s,enum stat_item si)9331 static void clear_stat(struct kmem_cache *s, enum stat_item si)
9332 {
9333 int cpu;
9334
9335 for_each_online_cpu(cpu)
9336 per_cpu_ptr(s->cpu_stats, cpu)->stat[si] = 0;
9337 }
9338
9339 #define STAT_ATTR(si, text) \
9340 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
9341 { \
9342 return show_stat(s, buf, si); \
9343 } \
9344 static ssize_t text##_store(struct kmem_cache *s, \
9345 const char *buf, size_t length) \
9346 { \
9347 if (buf[0] != '0') \
9348 return -EINVAL; \
9349 clear_stat(s, si); \
9350 return length; \
9351 } \
9352 SLAB_ATTR(text); \
9353
9354 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
9355 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
9356 STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf);
9357 STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail);
9358 STAT_ATTR(FREE_FASTPATH, free_fastpath);
9359 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
9360 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
9361 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
9362 STAT_ATTR(ALLOC_SLAB, alloc_slab);
9363 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
9364 STAT_ATTR(FREE_SLAB, free_slab);
9365 STAT_ATTR(ORDER_FALLBACK, order_fallback);
9366 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
9367 STAT_ATTR(SHEAF_FLUSH, sheaf_flush);
9368 STAT_ATTR(SHEAF_REFILL, sheaf_refill);
9369 STAT_ATTR(SHEAF_ALLOC, sheaf_alloc);
9370 STAT_ATTR(SHEAF_FREE, sheaf_free);
9371 STAT_ATTR(BARN_GET, barn_get);
9372 STAT_ATTR(BARN_GET_FAIL, barn_get_fail);
9373 STAT_ATTR(BARN_PUT, barn_put);
9374 STAT_ATTR(BARN_PUT_FAIL, barn_put_fail);
9375 STAT_ATTR(SHEAF_PREFILL_FAST, sheaf_prefill_fast);
9376 STAT_ATTR(SHEAF_PREFILL_SLOW, sheaf_prefill_slow);
9377 STAT_ATTR(SHEAF_PREFILL_OVERSIZE, sheaf_prefill_oversize);
9378 STAT_ATTR(SHEAF_RETURN_FAST, sheaf_return_fast);
9379 STAT_ATTR(SHEAF_RETURN_SLOW, sheaf_return_slow);
9380 #endif /* CONFIG_SLUB_STATS */
9381
9382 #ifdef CONFIG_KFENCE
skip_kfence_show(struct kmem_cache * s,char * buf)9383 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
9384 {
9385 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
9386 }
9387
skip_kfence_store(struct kmem_cache * s,const char * buf,size_t length)9388 static ssize_t skip_kfence_store(struct kmem_cache *s,
9389 const char *buf, size_t length)
9390 {
9391 int ret = length;
9392
9393 if (buf[0] == '0')
9394 s->flags &= ~SLAB_SKIP_KFENCE;
9395 else if (buf[0] == '1')
9396 s->flags |= SLAB_SKIP_KFENCE;
9397 else
9398 ret = -EINVAL;
9399
9400 return ret;
9401 }
9402 SLAB_ATTR(skip_kfence);
9403 #endif
9404
9405 static const struct attribute *const slab_attrs[] = {
9406 &slab_size_attr.attr,
9407 &object_size_attr.attr,
9408 &objs_per_slab_attr.attr,
9409 &order_attr.attr,
9410 &sheaf_capacity_attr.attr,
9411 &min_partial_attr.attr,
9412 &cpu_partial_attr.attr,
9413 &objects_partial_attr.attr,
9414 &partial_attr.attr,
9415 &cpu_slabs_attr.attr,
9416 &ctor_attr.attr,
9417 &aliases_attr.attr,
9418 &align_attr.attr,
9419 &hwcache_align_attr.attr,
9420 &reclaim_account_attr.attr,
9421 &destroy_by_rcu_attr.attr,
9422 &shrink_attr.attr,
9423 &slabs_cpu_partial_attr.attr,
9424 #ifdef CONFIG_SLUB_DEBUG
9425 &total_objects_attr.attr,
9426 &objects_attr.attr,
9427 &slabs_attr.attr,
9428 &sanity_checks_attr.attr,
9429 &trace_attr.attr,
9430 &red_zone_attr.attr,
9431 &poison_attr.attr,
9432 &store_user_attr.attr,
9433 &validate_attr.attr,
9434 #endif
9435 #ifdef CONFIG_ZONE_DMA
9436 &cache_dma_attr.attr,
9437 #endif
9438 #ifdef CONFIG_NUMA
9439 &remote_node_defrag_ratio_attr.attr,
9440 #endif
9441 #ifdef CONFIG_SLUB_STATS
9442 &alloc_fastpath_attr.attr,
9443 &alloc_slowpath_attr.attr,
9444 &free_rcu_sheaf_attr.attr,
9445 &free_rcu_sheaf_fail_attr.attr,
9446 &free_fastpath_attr.attr,
9447 &free_slowpath_attr.attr,
9448 &free_add_partial_attr.attr,
9449 &free_remove_partial_attr.attr,
9450 &alloc_slab_attr.attr,
9451 &alloc_node_mismatch_attr.attr,
9452 &free_slab_attr.attr,
9453 &order_fallback_attr.attr,
9454 &cmpxchg_double_fail_attr.attr,
9455 &sheaf_flush_attr.attr,
9456 &sheaf_refill_attr.attr,
9457 &sheaf_alloc_attr.attr,
9458 &sheaf_free_attr.attr,
9459 &barn_get_attr.attr,
9460 &barn_get_fail_attr.attr,
9461 &barn_put_attr.attr,
9462 &barn_put_fail_attr.attr,
9463 &sheaf_prefill_fast_attr.attr,
9464 &sheaf_prefill_slow_attr.attr,
9465 &sheaf_prefill_oversize_attr.attr,
9466 &sheaf_return_fast_attr.attr,
9467 &sheaf_return_slow_attr.attr,
9468 #endif
9469 #ifdef CONFIG_FAILSLAB
9470 &failslab_attr.attr,
9471 #endif
9472 #ifdef CONFIG_HARDENED_USERCOPY
9473 &usersize_attr.attr,
9474 #endif
9475 #ifdef CONFIG_KFENCE
9476 &skip_kfence_attr.attr,
9477 #endif
9478
9479 NULL
9480 };
9481
9482 ATTRIBUTE_GROUPS(slab);
9483
slab_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)9484 static ssize_t slab_attr_show(struct kobject *kobj,
9485 struct attribute *attr,
9486 char *buf)
9487 {
9488 const struct slab_attribute *attribute;
9489 struct kmem_cache *s;
9490
9491 attribute = to_slab_attr(attr);
9492 s = to_slab(kobj);
9493
9494 if (!attribute->show)
9495 return -EIO;
9496
9497 return attribute->show(s, buf);
9498 }
9499
slab_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)9500 static ssize_t slab_attr_store(struct kobject *kobj,
9501 struct attribute *attr,
9502 const char *buf, size_t len)
9503 {
9504 const struct slab_attribute *attribute;
9505 struct kmem_cache *s;
9506
9507 attribute = to_slab_attr(attr);
9508 s = to_slab(kobj);
9509
9510 if (!attribute->store)
9511 return -EIO;
9512
9513 return attribute->store(s, buf, len);
9514 }
9515
kmem_cache_release(struct kobject * k)9516 static void kmem_cache_release(struct kobject *k)
9517 {
9518 slab_kmem_cache_release(to_slab(k));
9519 }
9520
9521 static const struct sysfs_ops slab_sysfs_ops = {
9522 .show = slab_attr_show,
9523 .store = slab_attr_store,
9524 };
9525
9526 static const struct kobj_type slab_ktype = {
9527 .sysfs_ops = &slab_sysfs_ops,
9528 .release = kmem_cache_release,
9529 .default_groups = slab_groups,
9530 };
9531
9532 static struct kset *slab_kset;
9533
cache_kset(struct kmem_cache * s)9534 static inline struct kset *cache_kset(struct kmem_cache *s)
9535 {
9536 return slab_kset;
9537 }
9538
9539 #define ID_STR_LENGTH 32
9540
9541 /* Create a unique string id for a slab cache:
9542 *
9543 * Format :[flags-]size
9544 */
create_unique_id(struct kmem_cache * s)9545 static char *create_unique_id(struct kmem_cache *s)
9546 {
9547 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
9548 char *p = name;
9549
9550 if (!name)
9551 return ERR_PTR(-ENOMEM);
9552
9553 *p++ = ':';
9554 /*
9555 * First flags affecting slabcache operations. We will only
9556 * get here for aliasable slabs so we do not need to support
9557 * too many flags. The flags here must cover all flags that
9558 * are matched during merging to guarantee that the id is
9559 * unique.
9560 */
9561 if (s->flags & SLAB_CACHE_DMA)
9562 *p++ = 'd';
9563 if (s->flags & SLAB_CACHE_DMA32)
9564 *p++ = 'D';
9565 if (s->flags & SLAB_RECLAIM_ACCOUNT)
9566 *p++ = 'a';
9567 if (s->flags & SLAB_CONSISTENCY_CHECKS)
9568 *p++ = 'F';
9569 if (s->flags & SLAB_ACCOUNT)
9570 *p++ = 'A';
9571 if (p != name + 1)
9572 *p++ = '-';
9573 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
9574
9575 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
9576 kfree(name);
9577 return ERR_PTR(-EINVAL);
9578 }
9579 kmsan_unpoison_memory(name, p - name);
9580 return name;
9581 }
9582
sysfs_slab_add(struct kmem_cache * s)9583 static int sysfs_slab_add(struct kmem_cache *s)
9584 {
9585 int err;
9586 const char *name;
9587 struct kset *kset = cache_kset(s);
9588 int unmergeable = slab_unmergeable(s);
9589
9590 if (!unmergeable && disable_higher_order_debug &&
9591 (slub_debug & DEBUG_METADATA_FLAGS))
9592 unmergeable = 1;
9593
9594 if (unmergeable) {
9595 /*
9596 * Slabcache can never be merged so we can use the name proper.
9597 * This is typically the case for debug situations. In that
9598 * case we can catch duplicate names easily.
9599 */
9600 sysfs_remove_link(&slab_kset->kobj, s->name);
9601 name = s->name;
9602 } else {
9603 /*
9604 * Create a unique name for the slab as a target
9605 * for the symlinks.
9606 */
9607 name = create_unique_id(s);
9608 if (IS_ERR(name))
9609 return PTR_ERR(name);
9610 }
9611
9612 s->kobj.kset = kset;
9613 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
9614 if (err)
9615 goto out;
9616
9617 if (!unmergeable) {
9618 /* Setup first alias */
9619 sysfs_slab_alias(s, s->name);
9620 }
9621 out:
9622 if (!unmergeable)
9623 kfree(name);
9624 return err;
9625 }
9626
sysfs_slab_unlink(struct kmem_cache * s)9627 void sysfs_slab_unlink(struct kmem_cache *s)
9628 {
9629 if (s->kobj.state_in_sysfs)
9630 kobject_del(&s->kobj);
9631 }
9632
sysfs_slab_release(struct kmem_cache * s)9633 void sysfs_slab_release(struct kmem_cache *s)
9634 {
9635 kobject_put(&s->kobj);
9636 }
9637
9638 /*
9639 * Need to buffer aliases during bootup until sysfs becomes
9640 * available lest we lose that information.
9641 */
9642 struct saved_alias {
9643 struct kmem_cache *s;
9644 const char *name;
9645 struct saved_alias *next;
9646 };
9647
9648 static struct saved_alias *alias_list;
9649
sysfs_slab_alias(struct kmem_cache * s,const char * name)9650 int sysfs_slab_alias(struct kmem_cache *s, const char *name)
9651 {
9652 struct saved_alias *al;
9653
9654 if (slab_state == FULL) {
9655 /*
9656 * If we have a leftover link then remove it.
9657 */
9658 sysfs_remove_link(&slab_kset->kobj, name);
9659 /*
9660 * The original cache may have failed to generate sysfs file.
9661 * In that case, sysfs_create_link() returns -ENOENT and
9662 * symbolic link creation is skipped.
9663 */
9664 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
9665 }
9666
9667 al = kmalloc_obj(struct saved_alias);
9668 if (!al)
9669 return -ENOMEM;
9670
9671 al->s = s;
9672 al->name = name;
9673 al->next = alias_list;
9674 alias_list = al;
9675 kmsan_unpoison_memory(al, sizeof(*al));
9676 return 0;
9677 }
9678
slab_sysfs_init(void)9679 static int __init slab_sysfs_init(void)
9680 {
9681 struct kmem_cache *s;
9682 int err;
9683
9684 mutex_lock(&slab_mutex);
9685
9686 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
9687 if (!slab_kset) {
9688 mutex_unlock(&slab_mutex);
9689 pr_err("Cannot register slab subsystem.\n");
9690 return -ENOMEM;
9691 }
9692
9693 slab_state = FULL;
9694
9695 list_for_each_entry(s, &slab_caches, list) {
9696 err = sysfs_slab_add(s);
9697 if (err)
9698 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
9699 s->name);
9700 }
9701
9702 while (alias_list) {
9703 struct saved_alias *al = alias_list;
9704
9705 alias_list = alias_list->next;
9706 err = sysfs_slab_alias(al->s, al->name);
9707 if (err)
9708 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
9709 al->name);
9710 kfree(al);
9711 }
9712
9713 mutex_unlock(&slab_mutex);
9714 return 0;
9715 }
9716 late_initcall(slab_sysfs_init);
9717 #endif /* SLAB_SUPPORTS_SYSFS */
9718
9719 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
slab_debugfs_show(struct seq_file * seq,void * v)9720 static int slab_debugfs_show(struct seq_file *seq, void *v)
9721 {
9722 struct loc_track *t = seq->private;
9723 struct location *l;
9724 unsigned long idx;
9725
9726 idx = (unsigned long) t->idx;
9727 if (idx < t->count) {
9728 l = &t->loc[idx];
9729
9730 seq_printf(seq, "%7ld ", l->count);
9731
9732 if (l->addr)
9733 seq_printf(seq, "%pS", (void *)l->addr);
9734 else
9735 seq_puts(seq, "<not-available>");
9736
9737 if (l->waste)
9738 seq_printf(seq, " waste=%lu/%lu",
9739 l->count * l->waste, l->waste);
9740
9741 if (l->sum_time != l->min_time) {
9742 seq_printf(seq, " age=%ld/%llu/%ld",
9743 l->min_time, div_u64(l->sum_time, l->count),
9744 l->max_time);
9745 } else
9746 seq_printf(seq, " age=%ld", l->min_time);
9747
9748 if (l->min_pid != l->max_pid)
9749 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
9750 else
9751 seq_printf(seq, " pid=%ld",
9752 l->min_pid);
9753
9754 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
9755 seq_printf(seq, " cpus=%*pbl",
9756 cpumask_pr_args(to_cpumask(l->cpus)));
9757
9758 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
9759 seq_printf(seq, " nodes=%*pbl",
9760 nodemask_pr_args(&l->nodes));
9761
9762 #ifdef CONFIG_STACKDEPOT
9763 {
9764 depot_stack_handle_t handle;
9765 unsigned long *entries;
9766 unsigned int nr_entries, j;
9767
9768 handle = READ_ONCE(l->handle);
9769 if (handle) {
9770 nr_entries = stack_depot_fetch(handle, &entries);
9771 seq_puts(seq, "\n");
9772 for (j = 0; j < nr_entries; j++)
9773 seq_printf(seq, " %pS\n", (void *)entries[j]);
9774 }
9775 }
9776 #endif
9777 seq_puts(seq, "\n");
9778 }
9779
9780 if (!idx && !t->count)
9781 seq_puts(seq, "No data\n");
9782
9783 return 0;
9784 }
9785
slab_debugfs_stop(struct seq_file * seq,void * v)9786 static void slab_debugfs_stop(struct seq_file *seq, void *v)
9787 {
9788 }
9789
slab_debugfs_next(struct seq_file * seq,void * v,loff_t * ppos)9790 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
9791 {
9792 struct loc_track *t = seq->private;
9793
9794 t->idx = ++(*ppos);
9795 if (*ppos <= t->count)
9796 return ppos;
9797
9798 return NULL;
9799 }
9800
cmp_loc_by_count(const void * a,const void * b)9801 static int cmp_loc_by_count(const void *a, const void *b)
9802 {
9803 struct location *loc1 = (struct location *)a;
9804 struct location *loc2 = (struct location *)b;
9805
9806 return cmp_int(loc2->count, loc1->count);
9807 }
9808
slab_debugfs_start(struct seq_file * seq,loff_t * ppos)9809 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
9810 {
9811 struct loc_track *t = seq->private;
9812
9813 t->idx = *ppos;
9814 return ppos;
9815 }
9816
9817 static const struct seq_operations slab_debugfs_sops = {
9818 .start = slab_debugfs_start,
9819 .next = slab_debugfs_next,
9820 .stop = slab_debugfs_stop,
9821 .show = slab_debugfs_show,
9822 };
9823
slab_debug_trace_open(struct inode * inode,struct file * filep)9824 static int slab_debug_trace_open(struct inode *inode, struct file *filep)
9825 {
9826
9827 struct kmem_cache_node *n;
9828 enum track_item alloc;
9829 int node;
9830 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
9831 sizeof(struct loc_track));
9832 struct kmem_cache *s = file_inode(filep)->i_private;
9833 unsigned long *obj_map;
9834
9835 if (!t)
9836 return -ENOMEM;
9837
9838 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
9839 if (!obj_map) {
9840 seq_release_private(inode, filep);
9841 return -ENOMEM;
9842 }
9843
9844 alloc = debugfs_get_aux_num(filep);
9845
9846 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
9847 bitmap_free(obj_map);
9848 seq_release_private(inode, filep);
9849 return -ENOMEM;
9850 }
9851
9852 for_each_kmem_cache_node(s, node, n) {
9853 unsigned long flags;
9854 struct slab *slab;
9855
9856 if (!node_nr_slabs(n))
9857 continue;
9858
9859 spin_lock_irqsave(&n->list_lock, flags);
9860 list_for_each_entry(slab, &n->partial, slab_list)
9861 process_slab(t, s, slab, alloc, obj_map);
9862 list_for_each_entry(slab, &n->full, slab_list)
9863 process_slab(t, s, slab, alloc, obj_map);
9864 spin_unlock_irqrestore(&n->list_lock, flags);
9865 }
9866
9867 /* Sort locations by count */
9868 sort(t->loc, t->count, sizeof(struct location),
9869 cmp_loc_by_count, NULL);
9870
9871 bitmap_free(obj_map);
9872 return 0;
9873 }
9874
slab_debug_trace_release(struct inode * inode,struct file * file)9875 static int slab_debug_trace_release(struct inode *inode, struct file *file)
9876 {
9877 struct seq_file *seq = file->private_data;
9878 struct loc_track *t = seq->private;
9879
9880 free_loc_track(t);
9881 return seq_release_private(inode, file);
9882 }
9883
9884 static const struct file_operations slab_debugfs_fops = {
9885 .open = slab_debug_trace_open,
9886 .read = seq_read,
9887 .llseek = seq_lseek,
9888 .release = slab_debug_trace_release,
9889 };
9890
debugfs_slab_add(struct kmem_cache * s)9891 static void debugfs_slab_add(struct kmem_cache *s)
9892 {
9893 struct dentry *slab_cache_dir;
9894
9895 if (unlikely(!slab_debugfs_root))
9896 return;
9897
9898 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
9899
9900 debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s,
9901 TRACK_ALLOC, &slab_debugfs_fops);
9902
9903 debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s,
9904 TRACK_FREE, &slab_debugfs_fops);
9905 }
9906
debugfs_slab_release(struct kmem_cache * s)9907 void debugfs_slab_release(struct kmem_cache *s)
9908 {
9909 debugfs_lookup_and_remove(s->name, slab_debugfs_root);
9910 }
9911
slab_debugfs_init(void)9912 static int __init slab_debugfs_init(void)
9913 {
9914 struct kmem_cache *s;
9915
9916 slab_debugfs_root = debugfs_create_dir("slab", NULL);
9917
9918 list_for_each_entry(s, &slab_caches, list)
9919 if (s->flags & SLAB_STORE_USER)
9920 debugfs_slab_add(s);
9921
9922 return 0;
9923
9924 }
9925 __initcall(slab_debugfs_init);
9926 #endif
9927 /*
9928 * The /proc/slabinfo ABI
9929 */
9930 #ifdef CONFIG_SLUB_DEBUG
get_slabinfo(struct kmem_cache * s,struct slabinfo * sinfo)9931 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
9932 {
9933 unsigned long nr_slabs = 0;
9934 unsigned long nr_objs = 0;
9935 unsigned long nr_free = 0;
9936 int node;
9937 struct kmem_cache_node *n;
9938
9939 for_each_kmem_cache_node(s, node, n) {
9940 nr_slabs += node_nr_slabs(n);
9941 nr_objs += node_nr_objs(n);
9942 nr_free += count_partial_free_approx(n);
9943 }
9944
9945 sinfo->active_objs = nr_objs - nr_free;
9946 sinfo->num_objs = nr_objs;
9947 sinfo->active_slabs = nr_slabs;
9948 sinfo->num_slabs = nr_slabs;
9949 sinfo->objects_per_slab = oo_objects(s->oo);
9950 sinfo->cache_order = oo_order(s->oo);
9951 }
9952 #endif /* CONFIG_SLUB_DEBUG */
9953