xref: /linux/mm/slub.c (revision 9702969978695d9a699a1f34771580cdbb153b33)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SLUB: A slab allocator with low overhead percpu array caches and mostly
4  * lockless freeing of objects to slabs in the slowpath.
5  *
6  * The allocator synchronizes using spin_trylock for percpu arrays in the
7  * fastpath, and cmpxchg_double (or bit spinlock) for slowpath freeing.
8  * Uses a centralized lock to manage a pool of partial slabs.
9  *
10  * (C) 2007 SGI, Christoph Lameter
11  * (C) 2011 Linux Foundation, Christoph Lameter
12  * (C) 2025 SUSE, Vlastimil Babka
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/swap.h> /* mm_account_reclaimed_pages() */
17 #include <linux/module.h>
18 #include <linux/bit_spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/swab.h>
21 #include <linux/bitops.h>
22 #include <linux/slab.h>
23 #include "slab.h"
24 #include <linux/vmalloc.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kasan.h>
28 #include <linux/node.h>
29 #include <linux/kmsan.h>
30 #include <linux/cpu.h>
31 #include <linux/cpuset.h>
32 #include <linux/mempolicy.h>
33 #include <linux/ctype.h>
34 #include <linux/stackdepot.h>
35 #include <linux/debugobjects.h>
36 #include <linux/kallsyms.h>
37 #include <linux/kfence.h>
38 #include <linux/memory.h>
39 #include <linux/math64.h>
40 #include <linux/fault-inject.h>
41 #include <linux/kmemleak.h>
42 #include <linux/stacktrace.h>
43 #include <linux/prefetch.h>
44 #include <linux/memcontrol.h>
45 #include <linux/random.h>
46 #include <linux/prandom.h>
47 #include <kunit/test.h>
48 #include <kunit/test-bug.h>
49 #include <linux/sort.h>
50 #include <linux/irq_work.h>
51 #include <linux/kprobes.h>
52 #include <linux/debugfs.h>
53 #include <trace/events/kmem.h>
54 
55 #include "internal.h"
56 
57 /*
58  * Lock order:
59  *   0.  cpu_hotplug_lock
60  *   1.  slab_mutex (Global Mutex)
61  *   2a. kmem_cache->cpu_sheaves->lock (Local trylock)
62  *   2b. node->barn->lock (Spinlock)
63  *   2c. node->list_lock (Spinlock)
64  *   3.  slab_lock(slab) (Only on some arches)
65  *   4.  object_map_lock (Only for debugging)
66  *
67  *   slab_mutex
68  *
69  *   The role of the slab_mutex is to protect the list of all the slabs
70  *   and to synchronize major metadata changes to slab cache structures.
71  *   Also synchronizes memory hotplug callbacks.
72  *
73  *   slab_lock
74  *
75  *   The slab_lock is a wrapper around the page lock, thus it is a bit
76  *   spinlock.
77  *
78  *   The slab_lock is only used on arches that do not have the ability
79  *   to do a cmpxchg_double. It only protects:
80  *
81  *	A. slab->freelist	-> List of free objects in a slab
82  *	B. slab->inuse		-> Number of objects in use
83  *	C. slab->objects	-> Number of objects in slab
84  *	D. slab->frozen		-> frozen state
85  *
86  *   SL_partial slabs
87  *
88  *   Slabs on node partial list have at least one free object. A limited number
89  *   of slabs on the list can be fully free (slab->inuse == 0), until we start
90  *   discarding them. These slabs are marked with SL_partial, and the flag is
91  *   cleared while removing them, usually to grab their freelist afterwards.
92  *   This clearing also exempts them from list management. Please see
93  *   __slab_free() for more details.
94  *
95  *   Full slabs
96  *
97  *   For caches without debugging enabled, full slabs (slab->inuse ==
98  *   slab->objects and slab->freelist == NULL) are not placed on any list.
99  *   The __slab_free() freeing the first object from such a slab will place
100  *   it on the partial list. Caches with debugging enabled place such slab
101  *   on the full list and use different allocation and freeing paths.
102  *
103  *   Frozen slabs
104  *
105  *   If a slab is frozen then it is exempt from list management. It is used to
106  *   indicate a slab that has failed consistency checks and thus cannot be
107  *   allocated from anymore - it is also marked as full. Any previously
108  *   allocated objects will be simply leaked upon freeing instead of attempting
109  *   to modify the potentially corrupted freelist and metadata.
110  *
111  *   To sum up, the current scheme is:
112  *   - node partial slab:            SL_partial && !full && !frozen
113  *   - taken off partial list:      !SL_partial && !full && !frozen
114  *   - full slab, not on any list:  !SL_partial &&  full && !frozen
115  *   - frozen due to inconsistency: !SL_partial &&  full &&  frozen
116  *
117  *   node->list_lock (spinlock)
118  *
119  *   The list_lock protects the partial and full list on each node and
120  *   the partial slab counter. If taken then no new slabs may be added or
121  *   removed from the lists nor make the number of partial slabs be modified.
122  *   (Note that the total number of slabs is an atomic value that may be
123  *   modified without taking the list lock).
124  *
125  *   The list_lock is a centralized lock and thus we avoid taking it as
126  *   much as possible. As long as SLUB does not have to handle partial
127  *   slabs, operations can continue without any centralized lock.
128  *
129  *   For debug caches, all allocations are forced to go through a list_lock
130  *   protected region to serialize against concurrent validation.
131  *
132  *   cpu_sheaves->lock (local_trylock)
133  *
134  *   This lock protects fastpath operations on the percpu sheaves. On !RT it
135  *   only disables preemption and does no atomic operations. As long as the main
136  *   or spare sheaf can handle the allocation or free, there is no other
137  *   overhead.
138  *
139  *   node->barn->lock (spinlock)
140  *
141  *   This lock protects the operations on per-NUMA-node barn. It can quickly
142  *   serve an empty or full sheaf if available, and avoid more expensive refill
143  *   or flush operation.
144  *
145  *   Lockless freeing
146  *
147  *   Objects may have to be freed to their slabs when they are from a remote
148  *   node (where we want to avoid filling local sheaves with remote objects)
149  *   or when there are too many full sheaves. On architectures supporting
150  *   cmpxchg_double this is done by a lockless update of slab's freelist and
151  *   counters, otherwise slab_lock is taken. This only needs to take the
152  *   list_lock if it's a first free to a full slab, or when a slab becomes empty
153  *   after the free.
154  *
155  *   irq, preemption, migration considerations
156  *
157  *   Interrupts are disabled as part of list_lock or barn lock operations, or
158  *   around the slab_lock operation, in order to make the slab allocator safe
159  *   to use in the context of an irq.
160  *   Preemption is disabled as part of local_trylock operations.
161  *   kmalloc_nolock() and kfree_nolock() are safe in NMI context but see
162  *   their limitations.
163  *
164  * SLUB assigns two object arrays called sheaves for caching allocations and
165  * frees on each cpu, with a NUMA node shared barn for balancing between cpus.
166  * Allocations and frees are primarily served from these sheaves.
167  *
168  * Slabs with free elements are kept on a partial list and during regular
169  * operations no list for full slabs is used. If an object in a full slab is
170  * freed then the slab will show up again on the partial lists.
171  * We track full slabs for debugging purposes though because otherwise we
172  * cannot scan all objects.
173  *
174  * Slabs are freed when they become empty. Teardown and setup is minimal so we
175  * rely on the page allocators per cpu caches for fast frees and allocs.
176  *
177  * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
178  * 			options set. This moves	slab handling out of
179  * 			the fast path and disables lockless freelists.
180  */
181 
182 /**
183  * enum slab_flags - How the slab flags bits are used.
184  * @SL_locked: Is locked with slab_lock()
185  * @SL_partial: On the per-node partial list
186  * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves
187  *
188  * The slab flags share space with the page flags but some bits have
189  * different interpretations.  The high bits are used for information
190  * like zone/node/section.
191  */
192 enum slab_flags {
193 	SL_locked = PG_locked,
194 	SL_partial = PG_workingset,	/* Historical reasons for this bit */
195 	SL_pfmemalloc = PG_active,	/* Historical reasons for this bit */
196 };
197 
198 #ifndef CONFIG_SLUB_TINY
199 #define __fastpath_inline __always_inline
200 #else
201 #define __fastpath_inline
202 #endif
203 
204 #ifdef CONFIG_SLUB_DEBUG
205 #ifdef CONFIG_SLUB_DEBUG_ON
206 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
207 #else
208 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
209 #endif
210 #endif		/* CONFIG_SLUB_DEBUG */
211 
212 #ifdef CONFIG_NUMA
213 static DEFINE_STATIC_KEY_FALSE(strict_numa);
214 #endif
215 
216 /* Structure holding parameters for get_from_partial() call chain */
217 struct partial_context {
218 	gfp_t flags;
219 	unsigned int orig_size;
220 };
221 
222 /* Structure holding parameters for get_partial_node_bulk() */
223 struct partial_bulk_context {
224 	gfp_t flags;
225 	unsigned int min_objects;
226 	unsigned int max_objects;
227 	struct list_head slabs;
228 };
229 
230 static inline bool kmem_cache_debug(struct kmem_cache *s)
231 {
232 	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
233 }
234 
235 void *fixup_red_left(struct kmem_cache *s, void *p)
236 {
237 	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
238 		p += s->red_left_pad;
239 
240 	return p;
241 }
242 
243 /*
244  * Issues still to be resolved:
245  *
246  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
247  *
248  * - Variable sizing of the per node arrays
249  */
250 
251 /* Enable to log cmpxchg failures */
252 #undef SLUB_DEBUG_CMPXCHG
253 
254 #ifndef CONFIG_SLUB_TINY
255 /*
256  * Minimum number of partial slabs. These will be left on the partial
257  * lists even if they are empty. kmem_cache_shrink may reclaim them.
258  */
259 #define MIN_PARTIAL 5
260 
261 /*
262  * Maximum number of desirable partial slabs.
263  * The existence of more partial slabs makes kmem_cache_shrink
264  * sort the partial list by the number of objects in use.
265  */
266 #define MAX_PARTIAL 10
267 #else
268 #define MIN_PARTIAL 0
269 #define MAX_PARTIAL 0
270 #endif
271 
272 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
273 				SLAB_POISON | SLAB_STORE_USER)
274 
275 /*
276  * These debug flags cannot use CMPXCHG because there might be consistency
277  * issues when checking or reading debug information
278  */
279 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
280 				SLAB_TRACE)
281 
282 
283 /*
284  * Debugging flags that require metadata to be stored in the slab.  These get
285  * disabled when slab_debug=O is used and a cache's min order increases with
286  * metadata.
287  */
288 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
289 
290 #define OO_SHIFT	16
291 #define OO_MASK		((1 << OO_SHIFT) - 1)
292 #define MAX_OBJS_PER_PAGE	32767 /* since slab.objects is u15 */
293 
294 /* Internal SLUB flags */
295 /* Poison object */
296 #define __OBJECT_POISON		__SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
297 /* Use cmpxchg_double */
298 
299 #ifdef system_has_freelist_aba
300 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
301 #else
302 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_UNUSED
303 #endif
304 
305 /*
306  * Tracking user of a slab.
307  */
308 #define TRACK_ADDRS_COUNT 16
309 struct track {
310 	unsigned long addr;	/* Called from address */
311 #ifdef CONFIG_STACKDEPOT
312 	depot_stack_handle_t handle;
313 #endif
314 	int cpu;		/* Was running on cpu */
315 	int pid;		/* Pid context */
316 	unsigned long when;	/* When did the operation occur */
317 };
318 
319 enum track_item { TRACK_ALLOC, TRACK_FREE };
320 
321 #ifdef SLAB_SUPPORTS_SYSFS
322 static int sysfs_slab_add(struct kmem_cache *);
323 #else
324 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
325 #endif
326 
327 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
328 static void debugfs_slab_add(struct kmem_cache *);
329 #else
330 static inline void debugfs_slab_add(struct kmem_cache *s) { }
331 #endif
332 
333 enum add_mode {
334 	ADD_TO_HEAD,
335 	ADD_TO_TAIL,
336 };
337 
338 enum stat_item {
339 	ALLOC_FASTPATH,		/* Allocation from percpu sheaves */
340 	ALLOC_SLOWPATH,		/* Allocation from partial or new slab */
341 	FREE_RCU_SHEAF,		/* Free to rcu_free sheaf */
342 	FREE_RCU_SHEAF_FAIL,	/* Failed to free to a rcu_free sheaf */
343 	FREE_FASTPATH,		/* Free to percpu sheaves */
344 	FREE_SLOWPATH,		/* Free to a slab */
345 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
346 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
347 	ALLOC_SLAB,		/* New slab acquired from page allocator */
348 	ALLOC_NODE_MISMATCH,	/* Requested node different from cpu sheaf */
349 	FREE_SLAB,		/* Slab freed to the page allocator */
350 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
351 	CMPXCHG_DOUBLE_FAIL,	/* Failures of slab freelist update */
352 	SHEAF_FLUSH,		/* Objects flushed from a sheaf */
353 	SHEAF_REFILL,		/* Objects refilled to a sheaf */
354 	SHEAF_ALLOC,		/* Allocation of an empty sheaf */
355 	SHEAF_FREE,		/* Freeing of an empty sheaf */
356 	BARN_GET,		/* Got full sheaf from barn */
357 	BARN_GET_FAIL,		/* Failed to get full sheaf from barn */
358 	BARN_PUT,		/* Put full sheaf to barn */
359 	BARN_PUT_FAIL,		/* Failed to put full sheaf to barn */
360 	SHEAF_PREFILL_FAST,	/* Sheaf prefill grabbed the spare sheaf */
361 	SHEAF_PREFILL_SLOW,	/* Sheaf prefill found no spare sheaf */
362 	SHEAF_PREFILL_OVERSIZE,	/* Allocation of oversize sheaf for prefill */
363 	SHEAF_RETURN_FAST,	/* Sheaf return reattached spare sheaf */
364 	SHEAF_RETURN_SLOW,	/* Sheaf return could not reattach spare */
365 	NR_SLUB_STAT_ITEMS
366 };
367 
368 #ifdef CONFIG_SLUB_STATS
369 struct kmem_cache_stats {
370 	unsigned int stat[NR_SLUB_STAT_ITEMS];
371 };
372 #endif
373 
374 static inline void stat(const struct kmem_cache *s, enum stat_item si)
375 {
376 #ifdef CONFIG_SLUB_STATS
377 	/*
378 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
379 	 * avoid this_cpu_add()'s irq-disable overhead.
380 	 */
381 	raw_cpu_inc(s->cpu_stats->stat[si]);
382 #endif
383 }
384 
385 static inline
386 void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
387 {
388 #ifdef CONFIG_SLUB_STATS
389 	raw_cpu_add(s->cpu_stats->stat[si], v);
390 #endif
391 }
392 
393 #define MAX_FULL_SHEAVES	10
394 #define MAX_EMPTY_SHEAVES	10
395 
396 struct node_barn {
397 	spinlock_t lock;
398 	struct list_head sheaves_full;
399 	struct list_head sheaves_empty;
400 	unsigned int nr_full;
401 	unsigned int nr_empty;
402 };
403 
404 struct slab_sheaf {
405 	union {
406 		struct rcu_head rcu_head;
407 		struct list_head barn_list;
408 		/* only used for prefilled sheafs */
409 		struct {
410 			unsigned int capacity;
411 			bool pfmemalloc;
412 		};
413 	};
414 	struct kmem_cache *cache;
415 	unsigned int size;
416 	int node; /* only used for rcu_sheaf */
417 	void *objects[];
418 };
419 
420 struct slub_percpu_sheaves {
421 	local_trylock_t lock;
422 	struct slab_sheaf *main; /* never NULL when unlocked */
423 	struct slab_sheaf *spare; /* empty or full, may be NULL */
424 	struct slab_sheaf *rcu_free; /* for batching kfree_rcu() */
425 };
426 
427 /*
428  * The slab lists for all objects.
429  */
430 struct kmem_cache_node {
431 	spinlock_t list_lock;
432 	unsigned long nr_partial;
433 	struct list_head partial;
434 #ifdef CONFIG_SLUB_DEBUG
435 	atomic_long_t nr_slabs;
436 	atomic_long_t total_objects;
437 	struct list_head full;
438 #endif
439 	struct node_barn *barn;
440 };
441 
442 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
443 {
444 	return s->node[node];
445 }
446 
447 /*
448  * Get the barn of the current cpu's closest memory node. It may not exist on
449  * systems with memoryless nodes but without CONFIG_HAVE_MEMORYLESS_NODES
450  */
451 static inline struct node_barn *get_barn(struct kmem_cache *s)
452 {
453 	struct kmem_cache_node *n = get_node(s, numa_mem_id());
454 
455 	if (!n)
456 		return NULL;
457 
458 	return n->barn;
459 }
460 
461 /*
462  * Iterator over all nodes. The body will be executed for each node that has
463  * a kmem_cache_node structure allocated (which is true for all online nodes)
464  */
465 #define for_each_kmem_cache_node(__s, __node, __n) \
466 	for (__node = 0; __node < nr_node_ids; __node++) \
467 		 if ((__n = get_node(__s, __node)))
468 
469 /*
470  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
471  * Corresponds to node_state[N_MEMORY], but can temporarily
472  * differ during memory hotplug/hotremove operations.
473  * Protected by slab_mutex.
474  */
475 static nodemask_t slab_nodes;
476 
477 /*
478  * Workqueue used for flushing cpu and kfree_rcu sheaves.
479  */
480 static struct workqueue_struct *flushwq;
481 
482 struct slub_flush_work {
483 	struct work_struct work;
484 	struct kmem_cache *s;
485 	bool skip;
486 };
487 
488 static DEFINE_MUTEX(flush_lock);
489 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
490 
491 /********************************************************************
492  * 			Core slab cache functions
493  *******************************************************************/
494 
495 /*
496  * Returns freelist pointer (ptr). With hardening, this is obfuscated
497  * with an XOR of the address where the pointer is held and a per-cache
498  * random number.
499  */
500 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
501 					    void *ptr, unsigned long ptr_addr)
502 {
503 	unsigned long encoded;
504 
505 #ifdef CONFIG_SLAB_FREELIST_HARDENED
506 	encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
507 #else
508 	encoded = (unsigned long)ptr;
509 #endif
510 	return (freeptr_t){.v = encoded};
511 }
512 
513 static inline void *freelist_ptr_decode(const struct kmem_cache *s,
514 					freeptr_t ptr, unsigned long ptr_addr)
515 {
516 	void *decoded;
517 
518 #ifdef CONFIG_SLAB_FREELIST_HARDENED
519 	decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
520 #else
521 	decoded = (void *)ptr.v;
522 #endif
523 	return decoded;
524 }
525 
526 static inline void *get_freepointer(struct kmem_cache *s, void *object)
527 {
528 	unsigned long ptr_addr;
529 	freeptr_t p;
530 
531 	object = kasan_reset_tag(object);
532 	ptr_addr = (unsigned long)object + s->offset;
533 	p = *(freeptr_t *)(ptr_addr);
534 	return freelist_ptr_decode(s, p, ptr_addr);
535 }
536 
537 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
538 {
539 	unsigned long freeptr_addr = (unsigned long)object + s->offset;
540 
541 #ifdef CONFIG_SLAB_FREELIST_HARDENED
542 	BUG_ON(object == fp); /* naive detection of double free or corruption */
543 #endif
544 
545 	freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
546 	*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
547 }
548 
549 /*
550  * See comment in calculate_sizes().
551  */
552 static inline bool freeptr_outside_object(struct kmem_cache *s)
553 {
554 	return s->offset >= s->inuse;
555 }
556 
557 /*
558  * Return offset of the end of info block which is inuse + free pointer if
559  * not overlapping with object.
560  */
561 static inline unsigned int get_info_end(struct kmem_cache *s)
562 {
563 	if (freeptr_outside_object(s))
564 		return s->inuse + sizeof(void *);
565 	else
566 		return s->inuse;
567 }
568 
569 /* Loop over all objects in a slab */
570 #define for_each_object(__p, __s, __addr, __objects) \
571 	for (__p = fixup_red_left(__s, __addr); \
572 		__p < (__addr) + (__objects) * (__s)->size; \
573 		__p += (__s)->size)
574 
575 static inline unsigned int order_objects(unsigned int order, unsigned int size)
576 {
577 	return ((unsigned int)PAGE_SIZE << order) / size;
578 }
579 
580 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
581 		unsigned int size)
582 {
583 	struct kmem_cache_order_objects x = {
584 		(order << OO_SHIFT) + order_objects(order, size)
585 	};
586 
587 	return x;
588 }
589 
590 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
591 {
592 	return x.x >> OO_SHIFT;
593 }
594 
595 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
596 {
597 	return x.x & OO_MASK;
598 }
599 
600 /*
601  * If network-based swap is enabled, slub must keep track of whether memory
602  * were allocated from pfmemalloc reserves.
603  */
604 static inline bool slab_test_pfmemalloc(const struct slab *slab)
605 {
606 	return test_bit(SL_pfmemalloc, &slab->flags.f);
607 }
608 
609 static inline void slab_set_pfmemalloc(struct slab *slab)
610 {
611 	set_bit(SL_pfmemalloc, &slab->flags.f);
612 }
613 
614 static inline void __slab_clear_pfmemalloc(struct slab *slab)
615 {
616 	__clear_bit(SL_pfmemalloc, &slab->flags.f);
617 }
618 
619 /*
620  * Per slab locking using the pagelock
621  */
622 static __always_inline void slab_lock(struct slab *slab)
623 {
624 	bit_spin_lock(SL_locked, &slab->flags.f);
625 }
626 
627 static __always_inline void slab_unlock(struct slab *slab)
628 {
629 	bit_spin_unlock(SL_locked, &slab->flags.f);
630 }
631 
632 static inline bool
633 __update_freelist_fast(struct slab *slab, struct freelist_counters *old,
634 		       struct freelist_counters *new)
635 {
636 #ifdef system_has_freelist_aba
637 	return try_cmpxchg_freelist(&slab->freelist_counters,
638 				    &old->freelist_counters,
639 				    new->freelist_counters);
640 #else
641 	return false;
642 #endif
643 }
644 
645 static inline bool
646 __update_freelist_slow(struct slab *slab, struct freelist_counters *old,
647 		       struct freelist_counters *new)
648 {
649 	bool ret = false;
650 
651 	slab_lock(slab);
652 	if (slab->freelist == old->freelist &&
653 	    slab->counters == old->counters) {
654 		slab->freelist = new->freelist;
655 		/* prevent tearing for the read in get_partial_node_bulk() */
656 		WRITE_ONCE(slab->counters, new->counters);
657 		ret = true;
658 	}
659 	slab_unlock(slab);
660 
661 	return ret;
662 }
663 
664 /*
665  * Interrupts must be disabled (for the fallback code to work right), typically
666  * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
667  * part of bit_spin_lock(), is sufficient because the policy is not to allow any
668  * allocation/ free operation in hardirq context. Therefore nothing can
669  * interrupt the operation.
670  */
671 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
672 		struct freelist_counters *old, struct freelist_counters *new, const char *n)
673 {
674 	bool ret;
675 
676 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
677 		lockdep_assert_irqs_disabled();
678 
679 	if (s->flags & __CMPXCHG_DOUBLE)
680 		ret = __update_freelist_fast(slab, old, new);
681 	else
682 		ret = __update_freelist_slow(slab, old, new);
683 
684 	if (likely(ret))
685 		return true;
686 
687 	cpu_relax();
688 	stat(s, CMPXCHG_DOUBLE_FAIL);
689 
690 #ifdef SLUB_DEBUG_CMPXCHG
691 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
692 #endif
693 
694 	return false;
695 }
696 
697 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
698 		struct freelist_counters *old, struct freelist_counters *new, const char *n)
699 {
700 	bool ret;
701 
702 	if (s->flags & __CMPXCHG_DOUBLE) {
703 		ret = __update_freelist_fast(slab, old, new);
704 	} else {
705 		unsigned long flags;
706 
707 		local_irq_save(flags);
708 		ret = __update_freelist_slow(slab, old, new);
709 		local_irq_restore(flags);
710 	}
711 	if (likely(ret))
712 		return true;
713 
714 	cpu_relax();
715 	stat(s, CMPXCHG_DOUBLE_FAIL);
716 
717 #ifdef SLUB_DEBUG_CMPXCHG
718 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
719 #endif
720 
721 	return false;
722 }
723 
724 /*
725  * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
726  * family will round up the real request size to these fixed ones, so
727  * there could be an extra area than what is requested. Save the original
728  * request size in the meta data area, for better debug and sanity check.
729  */
730 static inline void set_orig_size(struct kmem_cache *s,
731 				void *object, unsigned long orig_size)
732 {
733 	void *p = kasan_reset_tag(object);
734 
735 	if (!slub_debug_orig_size(s))
736 		return;
737 
738 	p += get_info_end(s);
739 	p += sizeof(struct track) * 2;
740 
741 	*(unsigned long *)p = orig_size;
742 }
743 
744 static inline unsigned long get_orig_size(struct kmem_cache *s, void *object)
745 {
746 	void *p = kasan_reset_tag(object);
747 
748 	if (is_kfence_address(object))
749 		return kfence_ksize(object);
750 
751 	if (!slub_debug_orig_size(s))
752 		return s->object_size;
753 
754 	p += get_info_end(s);
755 	p += sizeof(struct track) * 2;
756 
757 	return *(unsigned long *)p;
758 }
759 
760 #ifdef CONFIG_SLAB_OBJ_EXT
761 
762 /*
763  * Check if memory cgroup or memory allocation profiling is enabled.
764  * If enabled, SLUB tries to reduce memory overhead of accounting
765  * slab objects. If neither is enabled when this function is called,
766  * the optimization is simply skipped to avoid affecting caches that do not
767  * need slabobj_ext metadata.
768  *
769  * However, this may disable optimization when memory cgroup or memory
770  * allocation profiling is used, but slabs are created too early
771  * even before those subsystems are initialized.
772  */
773 static inline bool need_slab_obj_exts(struct kmem_cache *s)
774 {
775 	if (s->flags & SLAB_NO_OBJ_EXT)
776 		return false;
777 
778 	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
779 		return true;
780 
781 	if (mem_alloc_profiling_enabled())
782 		return true;
783 
784 	return false;
785 }
786 
787 static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
788 {
789 	return sizeof(struct slabobj_ext) * slab->objects;
790 }
791 
792 static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
793 						    struct slab *slab)
794 {
795 	unsigned long objext_offset;
796 
797 	objext_offset = s->size * slab->objects;
798 	objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext));
799 	return objext_offset;
800 }
801 
802 static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
803 						     struct slab *slab)
804 {
805 	unsigned long objext_offset = obj_exts_offset_in_slab(s, slab);
806 	unsigned long objext_size = obj_exts_size_in_slab(slab);
807 
808 	return objext_offset + objext_size <= slab_size(slab);
809 }
810 
811 static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
812 {
813 	unsigned long obj_exts;
814 	unsigned long start;
815 	unsigned long end;
816 
817 	obj_exts = slab_obj_exts(slab);
818 	if (!obj_exts)
819 		return false;
820 
821 	start = (unsigned long)slab_address(slab);
822 	end = start + slab_size(slab);
823 	return (obj_exts >= start) && (obj_exts < end);
824 }
825 #else
826 static inline bool need_slab_obj_exts(struct kmem_cache *s)
827 {
828 	return false;
829 }
830 
831 static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
832 {
833 	return 0;
834 }
835 
836 static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
837 						    struct slab *slab)
838 {
839 	return 0;
840 }
841 
842 static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
843 						     struct slab *slab)
844 {
845 	return false;
846 }
847 
848 static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
849 {
850 	return false;
851 }
852 
853 #endif
854 
855 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
856 static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
857 {
858 	/*
859 	 * Note we cannot rely on the SLAB_OBJ_EXT_IN_OBJ flag here and need to
860 	 * check the stride. A cache can have SLAB_OBJ_EXT_IN_OBJ set, but
861 	 * allocations within_slab_leftover are preferred. And those may be
862 	 * possible or not depending on the particular slab's size.
863 	 */
864 	return obj_exts_in_slab(s, slab) &&
865 	       (slab_get_stride(slab) == s->size);
866 }
867 
868 static unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
869 {
870 	unsigned int offset = get_info_end(s);
871 
872 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
873 		offset += sizeof(struct track) * 2;
874 
875 	if (slub_debug_orig_size(s))
876 		offset += sizeof(unsigned long);
877 
878 	offset += kasan_metadata_size(s, false);
879 
880 	return offset;
881 }
882 #else
883 static inline bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
884 {
885 	return false;
886 }
887 
888 static inline unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
889 {
890 	return 0;
891 }
892 #endif
893 
894 #ifdef CONFIG_SLUB_DEBUG
895 
896 /*
897  * For debugging context when we want to check if the struct slab pointer
898  * appears to be valid.
899  */
900 static inline bool validate_slab_ptr(struct slab *slab)
901 {
902 	return PageSlab(slab_page(slab));
903 }
904 
905 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
906 static DEFINE_SPINLOCK(object_map_lock);
907 
908 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
909 		       struct slab *slab)
910 {
911 	void *addr = slab_address(slab);
912 	void *p;
913 
914 	bitmap_zero(obj_map, slab->objects);
915 
916 	for (p = slab->freelist; p; p = get_freepointer(s, p))
917 		set_bit(__obj_to_index(s, addr, p), obj_map);
918 }
919 
920 #if IS_ENABLED(CONFIG_KUNIT)
921 static bool slab_add_kunit_errors(void)
922 {
923 	struct kunit_resource *resource;
924 
925 	if (!kunit_get_current_test())
926 		return false;
927 
928 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
929 	if (!resource)
930 		return false;
931 
932 	(*(int *)resource->data)++;
933 	kunit_put_resource(resource);
934 	return true;
935 }
936 
937 bool slab_in_kunit_test(void)
938 {
939 	struct kunit_resource *resource;
940 
941 	if (!kunit_get_current_test())
942 		return false;
943 
944 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
945 	if (!resource)
946 		return false;
947 
948 	kunit_put_resource(resource);
949 	return true;
950 }
951 #else
952 static inline bool slab_add_kunit_errors(void) { return false; }
953 #endif
954 
955 static inline unsigned int size_from_object(struct kmem_cache *s)
956 {
957 	if (s->flags & SLAB_RED_ZONE)
958 		return s->size - s->red_left_pad;
959 
960 	return s->size;
961 }
962 
963 static inline void *restore_red_left(struct kmem_cache *s, void *p)
964 {
965 	if (s->flags & SLAB_RED_ZONE)
966 		p -= s->red_left_pad;
967 
968 	return p;
969 }
970 
971 /*
972  * Debug settings:
973  */
974 #if defined(CONFIG_SLUB_DEBUG_ON)
975 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
976 #else
977 static slab_flags_t slub_debug;
978 #endif
979 
980 static const char *slub_debug_string __ro_after_init;
981 static int disable_higher_order_debug;
982 
983 /*
984  * Object debugging
985  */
986 
987 /* Verify that a pointer has an address that is valid within a slab page */
988 static inline int check_valid_pointer(struct kmem_cache *s,
989 				struct slab *slab, void *object)
990 {
991 	void *base;
992 
993 	if (!object)
994 		return 1;
995 
996 	base = slab_address(slab);
997 	object = kasan_reset_tag(object);
998 	object = restore_red_left(s, object);
999 	if (object < base || object >= base + slab->objects * s->size ||
1000 		(object - base) % s->size) {
1001 		return 0;
1002 	}
1003 
1004 	return 1;
1005 }
1006 
1007 static void print_section(char *level, char *text, u8 *addr,
1008 			  unsigned int length)
1009 {
1010 	metadata_access_enable();
1011 	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
1012 			16, 1, kasan_reset_tag((void *)addr), length, 1);
1013 	metadata_access_disable();
1014 }
1015 
1016 static struct track *get_track(struct kmem_cache *s, void *object,
1017 	enum track_item alloc)
1018 {
1019 	struct track *p;
1020 
1021 	p = object + get_info_end(s);
1022 
1023 	return kasan_reset_tag(p + alloc);
1024 }
1025 
1026 #ifdef CONFIG_STACKDEPOT
1027 static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
1028 {
1029 	depot_stack_handle_t handle;
1030 	unsigned long entries[TRACK_ADDRS_COUNT];
1031 	unsigned int nr_entries;
1032 
1033 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
1034 	handle = stack_depot_save(entries, nr_entries, gfp_flags);
1035 
1036 	return handle;
1037 }
1038 #else
1039 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
1040 {
1041 	return 0;
1042 }
1043 #endif
1044 
1045 static void set_track_update(struct kmem_cache *s, void *object,
1046 			     enum track_item alloc, unsigned long addr,
1047 			     depot_stack_handle_t handle)
1048 {
1049 	struct track *p = get_track(s, object, alloc);
1050 
1051 #ifdef CONFIG_STACKDEPOT
1052 	p->handle = handle;
1053 #endif
1054 	p->addr = addr;
1055 	p->cpu = raw_smp_processor_id();
1056 	p->pid = current->pid;
1057 	p->when = jiffies;
1058 }
1059 
1060 static __always_inline void set_track(struct kmem_cache *s, void *object,
1061 				      enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
1062 {
1063 	depot_stack_handle_t handle = set_track_prepare(gfp_flags);
1064 
1065 	set_track_update(s, object, alloc, addr, handle);
1066 }
1067 
1068 static void init_tracking(struct kmem_cache *s, void *object)
1069 {
1070 	struct track *p;
1071 
1072 	if (!(s->flags & SLAB_STORE_USER))
1073 		return;
1074 
1075 	p = get_track(s, object, TRACK_ALLOC);
1076 	memset(p, 0, 2*sizeof(struct track));
1077 }
1078 
1079 static void print_track(const char *s, struct track *t, unsigned long pr_time)
1080 {
1081 	depot_stack_handle_t handle __maybe_unused;
1082 
1083 	if (!t->addr)
1084 		return;
1085 
1086 	pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
1087 	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
1088 #ifdef CONFIG_STACKDEPOT
1089 	handle = READ_ONCE(t->handle);
1090 	if (handle)
1091 		stack_depot_print(handle);
1092 	else
1093 		pr_err("object allocation/free stack trace missing\n");
1094 #endif
1095 }
1096 
1097 void print_tracking(struct kmem_cache *s, void *object)
1098 {
1099 	unsigned long pr_time = jiffies;
1100 	if (!(s->flags & SLAB_STORE_USER))
1101 		return;
1102 
1103 	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
1104 	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
1105 }
1106 
1107 static void print_slab_info(const struct slab *slab)
1108 {
1109 	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
1110 	       slab, slab->objects, slab->inuse, slab->freelist,
1111 	       &slab->flags.f);
1112 }
1113 
1114 void skip_orig_size_check(struct kmem_cache *s, const void *object)
1115 {
1116 	set_orig_size(s, (void *)object, s->object_size);
1117 }
1118 
1119 static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
1120 {
1121 	struct va_format vaf;
1122 	va_list args;
1123 
1124 	va_copy(args, argsp);
1125 	vaf.fmt = fmt;
1126 	vaf.va = &args;
1127 	pr_err("=============================================================================\n");
1128 	pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
1129 	pr_err("-----------------------------------------------------------------------------\n\n");
1130 	va_end(args);
1131 }
1132 
1133 static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
1134 {
1135 	va_list args;
1136 
1137 	va_start(args, fmt);
1138 	__slab_bug(s, fmt, args);
1139 	va_end(args);
1140 }
1141 
1142 __printf(2, 3)
1143 static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
1144 {
1145 	struct va_format vaf;
1146 	va_list args;
1147 
1148 	if (slab_add_kunit_errors())
1149 		return;
1150 
1151 	va_start(args, fmt);
1152 	vaf.fmt = fmt;
1153 	vaf.va = &args;
1154 	pr_err("FIX %s: %pV\n", s->name, &vaf);
1155 	va_end(args);
1156 }
1157 
1158 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1159 {
1160 	unsigned int off;	/* Offset of last byte */
1161 	u8 *addr = slab_address(slab);
1162 
1163 	print_tracking(s, p);
1164 
1165 	print_slab_info(slab);
1166 
1167 	pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1168 	       p, p - addr, get_freepointer(s, p));
1169 
1170 	if (s->flags & SLAB_RED_ZONE)
1171 		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
1172 			      s->red_left_pad);
1173 	else if (p > addr + 16)
1174 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1175 
1176 	print_section(KERN_ERR,         "Object   ", p,
1177 		      min_t(unsigned int, s->object_size, PAGE_SIZE));
1178 	if (s->flags & SLAB_RED_ZONE)
1179 		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
1180 			s->inuse - s->object_size);
1181 
1182 	off = get_info_end(s);
1183 
1184 	if (s->flags & SLAB_STORE_USER)
1185 		off += 2 * sizeof(struct track);
1186 
1187 	if (slub_debug_orig_size(s))
1188 		off += sizeof(unsigned long);
1189 
1190 	off += kasan_metadata_size(s, false);
1191 
1192 	if (obj_exts_in_object(s, slab))
1193 		off += sizeof(struct slabobj_ext);
1194 
1195 	if (off != size_from_object(s))
1196 		/* Beginning of the filler is the free pointer */
1197 		print_section(KERN_ERR, "Padding  ", p + off,
1198 			      size_from_object(s) - off);
1199 }
1200 
1201 static void object_err(struct kmem_cache *s, struct slab *slab,
1202 			u8 *object, const char *reason)
1203 {
1204 	if (slab_add_kunit_errors())
1205 		return;
1206 
1207 	slab_bug(s, reason);
1208 	if (!object || !check_valid_pointer(s, slab, object)) {
1209 		print_slab_info(slab);
1210 		pr_err("Invalid pointer 0x%p\n", object);
1211 	} else {
1212 		print_trailer(s, slab, object);
1213 	}
1214 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1215 
1216 	WARN_ON(1);
1217 }
1218 
1219 static void __slab_err(struct slab *slab)
1220 {
1221 	if (slab_in_kunit_test())
1222 		return;
1223 
1224 	print_slab_info(slab);
1225 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1226 
1227 	WARN_ON(1);
1228 }
1229 
1230 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1231 			const char *fmt, ...)
1232 {
1233 	va_list args;
1234 
1235 	if (slab_add_kunit_errors())
1236 		return;
1237 
1238 	va_start(args, fmt);
1239 	__slab_bug(s, fmt, args);
1240 	va_end(args);
1241 
1242 	__slab_err(slab);
1243 }
1244 
1245 static void init_object(struct kmem_cache *s, void *object, u8 val)
1246 {
1247 	u8 *p = kasan_reset_tag(object);
1248 	unsigned int poison_size = s->object_size;
1249 
1250 	if (s->flags & SLAB_RED_ZONE) {
1251 		/*
1252 		 * Here and below, avoid overwriting the KMSAN shadow. Keeping
1253 		 * the shadow makes it possible to distinguish uninit-value
1254 		 * from use-after-free.
1255 		 */
1256 		memset_no_sanitize_memory(p - s->red_left_pad, val,
1257 					  s->red_left_pad);
1258 
1259 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1260 			/*
1261 			 * Redzone the extra allocated space by kmalloc than
1262 			 * requested, and the poison size will be limited to
1263 			 * the original request size accordingly.
1264 			 */
1265 			poison_size = get_orig_size(s, object);
1266 		}
1267 	}
1268 
1269 	if (s->flags & __OBJECT_POISON) {
1270 		memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1);
1271 		memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1);
1272 	}
1273 
1274 	if (s->flags & SLAB_RED_ZONE)
1275 		memset_no_sanitize_memory(p + poison_size, val,
1276 					  s->inuse - poison_size);
1277 }
1278 
1279 static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
1280 						void *from, void *to)
1281 {
1282 	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1283 	memset(from, data, to - from);
1284 }
1285 
1286 #ifdef CONFIG_KMSAN
1287 #define pad_check_attributes noinline __no_kmsan_checks
1288 #else
1289 #define pad_check_attributes
1290 #endif
1291 
1292 static pad_check_attributes int
1293 check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1294 		       u8 *object, const char *what, u8 *start, unsigned int value,
1295 		       unsigned int bytes, bool slab_obj_print)
1296 {
1297 	u8 *fault;
1298 	u8 *end;
1299 	u8 *addr = slab_address(slab);
1300 
1301 	metadata_access_enable();
1302 	fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1303 	metadata_access_disable();
1304 	if (!fault)
1305 		return 1;
1306 
1307 	end = start + bytes;
1308 	while (end > fault && end[-1] == value)
1309 		end--;
1310 
1311 	if (slab_add_kunit_errors())
1312 		goto skip_bug_print;
1313 
1314 	pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1315 	       what, fault, end - 1, fault - addr, fault[0], value);
1316 
1317 	if (slab_obj_print)
1318 		object_err(s, slab, object, "Object corrupt");
1319 
1320 skip_bug_print:
1321 	restore_bytes(s, what, value, fault, end);
1322 	return 0;
1323 }
1324 
1325 /*
1326  * Object field layout:
1327  *
1328  * [Left redzone padding] (if SLAB_RED_ZONE)
1329  *   - Field size: s->red_left_pad
1330  *   - Immediately precedes each object when SLAB_RED_ZONE is set.
1331  *   - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1332  *     0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
1333  *
1334  * [Object bytes] (object address starts here)
1335  *   - Field size: s->object_size
1336  *   - Object payload bytes.
1337  *   - If the freepointer may overlap the object, it is stored inside
1338  *     the object (typically near the middle).
1339  *   - Poisoning uses 0x6b (POISON_FREE) and the last byte is
1340  *     0xa5 (POISON_END) when __OBJECT_POISON is enabled.
1341  *
1342  * [Word-align padding] (right redzone when SLAB_RED_ZONE is set)
1343  *   - Field size: s->inuse - s->object_size
1344  *   - If redzoning is enabled and ALIGN(size, sizeof(void *)) adds no
1345  *     padding, explicitly extend by one word so the right redzone is
1346  *     non-empty.
1347  *   - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1348  *     0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
1349  *
1350  * [Metadata starts at object + s->inuse]
1351  *   - A. freelist pointer (if freeptr_outside_object)
1352  *   - B. alloc tracking (SLAB_STORE_USER)
1353  *   - C. free tracking (SLAB_STORE_USER)
1354  *   - D. original request size (SLAB_KMALLOC && SLAB_STORE_USER)
1355  *   - E. KASAN metadata (if enabled)
1356  *
1357  * [Mandatory padding] (if CONFIG_SLUB_DEBUG && SLAB_RED_ZONE)
1358  *   - One mandatory debug word to guarantee a minimum poisoned gap
1359  *     between metadata and the next object, independent of alignment.
1360  *   - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set.
1361  * [Final alignment padding]
1362  *   - Bytes added by ALIGN(size, s->align) to reach s->size.
1363  *   - When the padding is large enough, it can be used to store
1364  *     struct slabobj_ext for accounting metadata (obj_exts_in_object()).
1365  *   - The remaining bytes (if any) are filled with 0x5a (POISON_INUSE)
1366  *     when SLAB_POISON is set.
1367  *
1368  * Notes:
1369  * - Redzones are filled by init_object() with SLUB_RED_ACTIVE/INACTIVE.
1370  * - Object contents are poisoned with POISON_FREE/END when __OBJECT_POISON.
1371  * - The trailing padding is pre-filled with POISON_INUSE by
1372  *   setup_slab_debug() when SLAB_POISON is set, and is validated by
1373  *   check_pad_bytes().
1374  * - The first object pointer is slab_address(slab) +
1375  *   (s->red_left_pad if redzoning); subsequent objects are reached by
1376  *   adding s->size each time.
1377  *
1378  * If a slab cache flag relies on specific metadata to exist at a fixed
1379  * offset, the flag must be included in SLAB_NEVER_MERGE to prevent merging.
1380  * Otherwise, the cache would misbehave as s->object_size and s->inuse are
1381  * adjusted during cache merging (see __kmem_cache_alias()).
1382  */
1383 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1384 {
1385 	unsigned long off = get_info_end(s);	/* The end of info */
1386 
1387 	if (s->flags & SLAB_STORE_USER) {
1388 		/* We also have user information there */
1389 		off += 2 * sizeof(struct track);
1390 
1391 		if (s->flags & SLAB_KMALLOC)
1392 			off += sizeof(unsigned long);
1393 	}
1394 
1395 	off += kasan_metadata_size(s, false);
1396 
1397 	if (obj_exts_in_object(s, slab))
1398 		off += sizeof(struct slabobj_ext);
1399 
1400 	if (size_from_object(s) == off)
1401 		return 1;
1402 
1403 	return check_bytes_and_report(s, slab, p, "Object padding",
1404 			p + off, POISON_INUSE, size_from_object(s) - off, true);
1405 }
1406 
1407 /* Check the pad bytes at the end of a slab page */
1408 static pad_check_attributes void
1409 slab_pad_check(struct kmem_cache *s, struct slab *slab)
1410 {
1411 	u8 *start;
1412 	u8 *fault;
1413 	u8 *end;
1414 	u8 *pad;
1415 	int length;
1416 	int remainder;
1417 
1418 	if (!(s->flags & SLAB_POISON))
1419 		return;
1420 
1421 	start = slab_address(slab);
1422 	length = slab_size(slab);
1423 	end = start + length;
1424 
1425 	if (obj_exts_in_slab(s, slab) && !obj_exts_in_object(s, slab)) {
1426 		remainder = length;
1427 		remainder -= obj_exts_offset_in_slab(s, slab);
1428 		remainder -= obj_exts_size_in_slab(slab);
1429 	} else {
1430 		remainder = length % s->size;
1431 	}
1432 
1433 	if (!remainder)
1434 		return;
1435 
1436 	pad = end - remainder;
1437 	metadata_access_enable();
1438 	fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1439 	metadata_access_disable();
1440 	if (!fault)
1441 		return;
1442 	while (end > fault && end[-1] == POISON_INUSE)
1443 		end--;
1444 
1445 	slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1446 		 fault, end - 1, fault - start);
1447 	print_section(KERN_ERR, "Padding ", pad, remainder);
1448 	__slab_err(slab);
1449 
1450 	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1451 }
1452 
1453 static int check_object(struct kmem_cache *s, struct slab *slab,
1454 					void *object, u8 val)
1455 {
1456 	u8 *p = object;
1457 	u8 *endobject = object + s->object_size;
1458 	unsigned int orig_size, kasan_meta_size;
1459 	int ret = 1;
1460 
1461 	if (s->flags & SLAB_RED_ZONE) {
1462 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1463 			object - s->red_left_pad, val, s->red_left_pad, ret))
1464 			ret = 0;
1465 
1466 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1467 			endobject, val, s->inuse - s->object_size, ret))
1468 			ret = 0;
1469 
1470 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1471 			orig_size = get_orig_size(s, object);
1472 
1473 			if (s->object_size > orig_size  &&
1474 				!check_bytes_and_report(s, slab, object,
1475 					"kmalloc Redzone", p + orig_size,
1476 					val, s->object_size - orig_size, ret)) {
1477 				ret = 0;
1478 			}
1479 		}
1480 	} else {
1481 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1482 			if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1483 				endobject, POISON_INUSE,
1484 				s->inuse - s->object_size, ret))
1485 				ret = 0;
1486 		}
1487 	}
1488 
1489 	if (s->flags & SLAB_POISON) {
1490 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1491 			/*
1492 			 * KASAN can save its free meta data inside of the
1493 			 * object at offset 0. Thus, skip checking the part of
1494 			 * the redzone that overlaps with the meta data.
1495 			 */
1496 			kasan_meta_size = kasan_metadata_size(s, true);
1497 			if (kasan_meta_size < s->object_size - 1 &&
1498 			    !check_bytes_and_report(s, slab, p, "Poison",
1499 					p + kasan_meta_size, POISON_FREE,
1500 					s->object_size - kasan_meta_size - 1, ret))
1501 				ret = 0;
1502 			if (kasan_meta_size < s->object_size &&
1503 			    !check_bytes_and_report(s, slab, p, "End Poison",
1504 					p + s->object_size - 1, POISON_END, 1, ret))
1505 				ret = 0;
1506 		}
1507 		/*
1508 		 * check_pad_bytes cleans up on its own.
1509 		 */
1510 		if (!check_pad_bytes(s, slab, p))
1511 			ret = 0;
1512 	}
1513 
1514 	/*
1515 	 * Cannot check freepointer while object is allocated if
1516 	 * object and freepointer overlap.
1517 	 */
1518 	if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1519 	    !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1520 		object_err(s, slab, p, "Freepointer corrupt");
1521 		/*
1522 		 * No choice but to zap it and thus lose the remainder
1523 		 * of the free objects in this slab. May cause
1524 		 * another error because the object count is now wrong.
1525 		 */
1526 		set_freepointer(s, p, NULL);
1527 		ret = 0;
1528 	}
1529 
1530 	return ret;
1531 }
1532 
1533 /*
1534  * Checks if the slab state looks sane. Assumes the struct slab pointer
1535  * was either obtained in a way that ensures it's valid, or validated
1536  * by validate_slab_ptr()
1537  */
1538 static int check_slab(struct kmem_cache *s, struct slab *slab)
1539 {
1540 	int maxobj;
1541 
1542 	maxobj = order_objects(slab_order(slab), s->size);
1543 	if (slab->objects > maxobj) {
1544 		slab_err(s, slab, "objects %u > max %u",
1545 			slab->objects, maxobj);
1546 		return 0;
1547 	}
1548 	if (slab->inuse > slab->objects) {
1549 		slab_err(s, slab, "inuse %u > max %u",
1550 			slab->inuse, slab->objects);
1551 		return 0;
1552 	}
1553 	if (slab->frozen) {
1554 		slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
1555 		return 0;
1556 	}
1557 
1558 	/* Slab_pad_check fixes things up after itself */
1559 	slab_pad_check(s, slab);
1560 	return 1;
1561 }
1562 
1563 /*
1564  * Determine if a certain object in a slab is on the freelist. Must hold the
1565  * slab lock to guarantee that the chains are in a consistent state.
1566  */
1567 static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1568 {
1569 	int nr = 0;
1570 	void *fp;
1571 	void *object = NULL;
1572 	int max_objects;
1573 
1574 	fp = slab->freelist;
1575 	while (fp && nr <= slab->objects) {
1576 		if (fp == search)
1577 			return true;
1578 		if (!check_valid_pointer(s, slab, fp)) {
1579 			if (object) {
1580 				object_err(s, slab, object,
1581 					"Freechain corrupt");
1582 				set_freepointer(s, object, NULL);
1583 				break;
1584 			} else {
1585 				slab_err(s, slab, "Freepointer corrupt");
1586 				slab->freelist = NULL;
1587 				slab->inuse = slab->objects;
1588 				slab_fix(s, "Freelist cleared");
1589 				return false;
1590 			}
1591 		}
1592 		object = fp;
1593 		fp = get_freepointer(s, object);
1594 		nr++;
1595 	}
1596 
1597 	if (nr > slab->objects) {
1598 		slab_err(s, slab, "Freelist cycle detected");
1599 		slab->freelist = NULL;
1600 		slab->inuse = slab->objects;
1601 		slab_fix(s, "Freelist cleared");
1602 		return false;
1603 	}
1604 
1605 	max_objects = order_objects(slab_order(slab), s->size);
1606 	if (max_objects > MAX_OBJS_PER_PAGE)
1607 		max_objects = MAX_OBJS_PER_PAGE;
1608 
1609 	if (slab->objects != max_objects) {
1610 		slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1611 			 slab->objects, max_objects);
1612 		slab->objects = max_objects;
1613 		slab_fix(s, "Number of objects adjusted");
1614 	}
1615 	if (slab->inuse != slab->objects - nr) {
1616 		slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1617 			 slab->inuse, slab->objects - nr);
1618 		slab->inuse = slab->objects - nr;
1619 		slab_fix(s, "Object count adjusted");
1620 	}
1621 	return search == NULL;
1622 }
1623 
1624 static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1625 								int alloc)
1626 {
1627 	if (s->flags & SLAB_TRACE) {
1628 		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1629 			s->name,
1630 			alloc ? "alloc" : "free",
1631 			object, slab->inuse,
1632 			slab->freelist);
1633 
1634 		if (!alloc)
1635 			print_section(KERN_INFO, "Object ", (void *)object,
1636 					s->object_size);
1637 
1638 		dump_stack();
1639 	}
1640 }
1641 
1642 /*
1643  * Tracking of fully allocated slabs for debugging purposes.
1644  */
1645 static void add_full(struct kmem_cache *s,
1646 	struct kmem_cache_node *n, struct slab *slab)
1647 {
1648 	if (!(s->flags & SLAB_STORE_USER))
1649 		return;
1650 
1651 	lockdep_assert_held(&n->list_lock);
1652 	list_add(&slab->slab_list, &n->full);
1653 }
1654 
1655 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1656 {
1657 	if (!(s->flags & SLAB_STORE_USER))
1658 		return;
1659 
1660 	lockdep_assert_held(&n->list_lock);
1661 	list_del(&slab->slab_list);
1662 }
1663 
1664 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1665 {
1666 	return atomic_long_read(&n->nr_slabs);
1667 }
1668 
1669 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1670 {
1671 	struct kmem_cache_node *n = get_node(s, node);
1672 
1673 	atomic_long_inc(&n->nr_slabs);
1674 	atomic_long_add(objects, &n->total_objects);
1675 }
1676 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1677 {
1678 	struct kmem_cache_node *n = get_node(s, node);
1679 
1680 	atomic_long_dec(&n->nr_slabs);
1681 	atomic_long_sub(objects, &n->total_objects);
1682 }
1683 
1684 /* Object debug checks for alloc/free paths */
1685 static void setup_object_debug(struct kmem_cache *s, void *object)
1686 {
1687 	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1688 		return;
1689 
1690 	init_object(s, object, SLUB_RED_INACTIVE);
1691 	init_tracking(s, object);
1692 }
1693 
1694 static
1695 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1696 {
1697 	if (!kmem_cache_debug_flags(s, SLAB_POISON))
1698 		return;
1699 
1700 	metadata_access_enable();
1701 	memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1702 	metadata_access_disable();
1703 }
1704 
1705 static inline int alloc_consistency_checks(struct kmem_cache *s,
1706 					struct slab *slab, void *object)
1707 {
1708 	if (!check_slab(s, slab))
1709 		return 0;
1710 
1711 	if (!check_valid_pointer(s, slab, object)) {
1712 		object_err(s, slab, object, "Freelist Pointer check fails");
1713 		return 0;
1714 	}
1715 
1716 	if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1717 		return 0;
1718 
1719 	return 1;
1720 }
1721 
1722 static noinline bool alloc_debug_processing(struct kmem_cache *s,
1723 			struct slab *slab, void *object, int orig_size)
1724 {
1725 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1726 		if (!alloc_consistency_checks(s, slab, object))
1727 			goto bad;
1728 	}
1729 
1730 	/* Success. Perform special debug activities for allocs */
1731 	trace(s, slab, object, 1);
1732 	set_orig_size(s, object, orig_size);
1733 	init_object(s, object, SLUB_RED_ACTIVE);
1734 	return true;
1735 
1736 bad:
1737 	/*
1738 	 * Let's do the best we can to avoid issues in the future. Marking all
1739 	 * objects as used avoids touching the remaining objects.
1740 	 */
1741 	slab_fix(s, "Marking all objects used");
1742 	slab->inuse = slab->objects;
1743 	slab->freelist = NULL;
1744 	slab->frozen = 1; /* mark consistency-failed slab as frozen */
1745 
1746 	return false;
1747 }
1748 
1749 static inline int free_consistency_checks(struct kmem_cache *s,
1750 		struct slab *slab, void *object, unsigned long addr)
1751 {
1752 	if (!check_valid_pointer(s, slab, object)) {
1753 		slab_err(s, slab, "Invalid object pointer 0x%p", object);
1754 		return 0;
1755 	}
1756 
1757 	if (on_freelist(s, slab, object)) {
1758 		object_err(s, slab, object, "Object already free");
1759 		return 0;
1760 	}
1761 
1762 	if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1763 		return 0;
1764 
1765 	if (unlikely(s != slab->slab_cache)) {
1766 		if (!slab->slab_cache) {
1767 			slab_err(NULL, slab, "No slab cache for object 0x%p",
1768 				 object);
1769 		} else {
1770 			object_err(s, slab, object,
1771 				   "page slab pointer corrupt.");
1772 		}
1773 		return 0;
1774 	}
1775 	return 1;
1776 }
1777 
1778 /*
1779  * Parse a block of slab_debug options. Blocks are delimited by ';'
1780  *
1781  * @str:    start of block
1782  * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1783  * @slabs:  return start of list of slabs, or NULL when there's no list
1784  * @init:   assume this is initial parsing and not per-kmem-create parsing
1785  *
1786  * returns the start of next block if there's any, or NULL
1787  */
1788 static const char *
1789 parse_slub_debug_flags(const char *str, slab_flags_t *flags, const char **slabs, bool init)
1790 {
1791 	bool higher_order_disable = false;
1792 
1793 	/* Skip any completely empty blocks */
1794 	while (*str && *str == ';')
1795 		str++;
1796 
1797 	if (*str == ',') {
1798 		/*
1799 		 * No options but restriction on slabs. This means full
1800 		 * debugging for slabs matching a pattern.
1801 		 */
1802 		*flags = DEBUG_DEFAULT_FLAGS;
1803 		goto check_slabs;
1804 	}
1805 	*flags = 0;
1806 
1807 	/* Determine which debug features should be switched on */
1808 	for (; *str && *str != ',' && *str != ';'; str++) {
1809 		switch (tolower(*str)) {
1810 		case '-':
1811 			*flags = 0;
1812 			break;
1813 		case 'f':
1814 			*flags |= SLAB_CONSISTENCY_CHECKS;
1815 			break;
1816 		case 'z':
1817 			*flags |= SLAB_RED_ZONE;
1818 			break;
1819 		case 'p':
1820 			*flags |= SLAB_POISON;
1821 			break;
1822 		case 'u':
1823 			*flags |= SLAB_STORE_USER;
1824 			break;
1825 		case 't':
1826 			*flags |= SLAB_TRACE;
1827 			break;
1828 		case 'a':
1829 			*flags |= SLAB_FAILSLAB;
1830 			break;
1831 		case 'o':
1832 			/*
1833 			 * Avoid enabling debugging on caches if its minimum
1834 			 * order would increase as a result.
1835 			 */
1836 			higher_order_disable = true;
1837 			break;
1838 		default:
1839 			if (init)
1840 				pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1841 		}
1842 	}
1843 check_slabs:
1844 	if (*str == ',')
1845 		*slabs = ++str;
1846 	else
1847 		*slabs = NULL;
1848 
1849 	/* Skip over the slab list */
1850 	while (*str && *str != ';')
1851 		str++;
1852 
1853 	/* Skip any completely empty blocks */
1854 	while (*str && *str == ';')
1855 		str++;
1856 
1857 	if (init && higher_order_disable)
1858 		disable_higher_order_debug = 1;
1859 
1860 	if (*str)
1861 		return str;
1862 	else
1863 		return NULL;
1864 }
1865 
1866 static int __init setup_slub_debug(const char *str, const struct kernel_param *kp)
1867 {
1868 	slab_flags_t flags;
1869 	slab_flags_t global_flags;
1870 	const char *saved_str;
1871 	const char *slab_list;
1872 	bool global_slub_debug_changed = false;
1873 	bool slab_list_specified = false;
1874 
1875 	global_flags = DEBUG_DEFAULT_FLAGS;
1876 	if (!str || !*str)
1877 		/*
1878 		 * No options specified. Switch on full debugging.
1879 		 */
1880 		goto out;
1881 
1882 	saved_str = str;
1883 	while (str) {
1884 		str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1885 
1886 		if (!slab_list) {
1887 			global_flags = flags;
1888 			global_slub_debug_changed = true;
1889 		} else {
1890 			slab_list_specified = true;
1891 			if (flags & SLAB_STORE_USER)
1892 				stack_depot_request_early_init();
1893 		}
1894 	}
1895 
1896 	/*
1897 	 * For backwards compatibility, a single list of flags with list of
1898 	 * slabs means debugging is only changed for those slabs, so the global
1899 	 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1900 	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1901 	 * long as there is no option specifying flags without a slab list.
1902 	 */
1903 	if (slab_list_specified) {
1904 		if (!global_slub_debug_changed)
1905 			global_flags = slub_debug;
1906 		slub_debug_string = saved_str;
1907 	}
1908 out:
1909 	slub_debug = global_flags;
1910 	if (slub_debug & SLAB_STORE_USER)
1911 		stack_depot_request_early_init();
1912 	if (slub_debug != 0 || slub_debug_string)
1913 		static_branch_enable(&slub_debug_enabled);
1914 	else
1915 		static_branch_disable(&slub_debug_enabled);
1916 	if ((static_branch_unlikely(&init_on_alloc) ||
1917 	     static_branch_unlikely(&init_on_free)) &&
1918 	    (slub_debug & SLAB_POISON))
1919 		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1920 	return 0;
1921 }
1922 
1923 static const struct kernel_param_ops param_ops_slab_debug __initconst = {
1924 	.flags = KERNEL_PARAM_OPS_FL_NOARG,
1925 	.set = setup_slub_debug,
1926 };
1927 __core_param_cb(slab_debug, &param_ops_slab_debug, NULL, 0);
1928 __core_param_cb(slub_debug, &param_ops_slab_debug, NULL, 0);
1929 
1930 /*
1931  * kmem_cache_flags - apply debugging options to the cache
1932  * @flags:		flags to set
1933  * @name:		name of the cache
1934  *
1935  * Debug option(s) are applied to @flags. In addition to the debug
1936  * option(s), if a slab name (or multiple) is specified i.e.
1937  * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1938  * then only the select slabs will receive the debug option(s).
1939  */
1940 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1941 {
1942 	const char *iter;
1943 	size_t len;
1944 	const char *next_block;
1945 	slab_flags_t block_flags;
1946 	slab_flags_t slub_debug_local = slub_debug;
1947 
1948 	if (flags & SLAB_NO_USER_FLAGS)
1949 		return flags;
1950 
1951 	/*
1952 	 * If the slab cache is for debugging (e.g. kmemleak) then
1953 	 * don't store user (stack trace) information by default,
1954 	 * but let the user enable it via the command line below.
1955 	 */
1956 	if (flags & SLAB_NOLEAKTRACE)
1957 		slub_debug_local &= ~SLAB_STORE_USER;
1958 
1959 	len = strlen(name);
1960 	next_block = slub_debug_string;
1961 	/* Go through all blocks of debug options, see if any matches our slab's name */
1962 	while (next_block) {
1963 		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1964 		if (!iter)
1965 			continue;
1966 		/* Found a block that has a slab list, search it */
1967 		while (*iter) {
1968 			const char *end, *glob;
1969 			size_t cmplen;
1970 
1971 			end = strchrnul(iter, ',');
1972 			if (next_block && next_block < end)
1973 				end = next_block - 1;
1974 
1975 			glob = strnchr(iter, end - iter, '*');
1976 			if (glob)
1977 				cmplen = glob - iter;
1978 			else
1979 				cmplen = max_t(size_t, len, (end - iter));
1980 
1981 			if (!strncmp(name, iter, cmplen)) {
1982 				flags |= block_flags;
1983 				return flags;
1984 			}
1985 
1986 			if (!*end || *end == ';')
1987 				break;
1988 			iter = end + 1;
1989 		}
1990 	}
1991 
1992 	return flags | slub_debug_local;
1993 }
1994 #else /* !CONFIG_SLUB_DEBUG */
1995 static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1996 static inline
1997 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1998 
1999 static inline bool alloc_debug_processing(struct kmem_cache *s,
2000 	struct slab *slab, void *object, int orig_size) { return true; }
2001 
2002 static inline bool free_debug_processing(struct kmem_cache *s,
2003 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
2004 	unsigned long addr, depot_stack_handle_t handle) { return true; }
2005 
2006 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
2007 static inline int check_object(struct kmem_cache *s, struct slab *slab,
2008 			void *object, u8 val) { return 1; }
2009 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
2010 static inline void set_track(struct kmem_cache *s, void *object,
2011 			     enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
2012 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
2013 					struct slab *slab) {}
2014 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
2015 					struct slab *slab) {}
2016 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
2017 {
2018 	return flags;
2019 }
2020 #define slub_debug 0
2021 
2022 #define disable_higher_order_debug 0
2023 
2024 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
2025 							{ return 0; }
2026 static inline void inc_slabs_node(struct kmem_cache *s, int node,
2027 							int objects) {}
2028 static inline void dec_slabs_node(struct kmem_cache *s, int node,
2029 							int objects) {}
2030 #endif /* CONFIG_SLUB_DEBUG */
2031 
2032 /*
2033  * The allocated objcg pointers array is not accounted directly.
2034  * Moreover, it should not come from DMA buffer and is not readily
2035  * reclaimable. So those GFP bits should be masked off.
2036  */
2037 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2038 				__GFP_ACCOUNT | __GFP_NOFAIL)
2039 
2040 #ifdef CONFIG_SLAB_OBJ_EXT
2041 
2042 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
2043 
2044 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
2045 {
2046 	struct slab *obj_exts_slab;
2047 	unsigned long slab_exts;
2048 
2049 	obj_exts_slab = virt_to_slab(obj_exts);
2050 	slab_exts = slab_obj_exts(obj_exts_slab);
2051 	if (slab_exts) {
2052 		get_slab_obj_exts(slab_exts);
2053 		unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
2054 						 obj_exts_slab, obj_exts);
2055 		struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
2056 						       slab_exts, offs);
2057 
2058 		if (unlikely(is_codetag_empty(&ext->ref))) {
2059 			put_slab_obj_exts(slab_exts);
2060 			return;
2061 		}
2062 
2063 		/* codetag should be NULL here */
2064 		WARN_ON(ext->ref.ct);
2065 		set_codetag_empty(&ext->ref);
2066 		put_slab_obj_exts(slab_exts);
2067 	}
2068 }
2069 
2070 static inline bool mark_failed_objexts_alloc(struct slab *slab)
2071 {
2072 	return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
2073 }
2074 
2075 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
2076 			struct slabobj_ext *vec, unsigned int objects)
2077 {
2078 	/*
2079 	 * If vector previously failed to allocate then we have live
2080 	 * objects with no tag reference. Mark all references in this
2081 	 * vector as empty to avoid warnings later on.
2082 	 */
2083 	if (obj_exts == OBJEXTS_ALLOC_FAIL) {
2084 		unsigned int i;
2085 
2086 		for (i = 0; i < objects; i++)
2087 			set_codetag_empty(&vec[i].ref);
2088 	}
2089 }
2090 
2091 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
2092 
2093 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
2094 static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
2095 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
2096 			struct slabobj_ext *vec, unsigned int objects) {}
2097 
2098 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
2099 
2100 static inline void init_slab_obj_exts(struct slab *slab)
2101 {
2102 	slab->obj_exts = 0;
2103 }
2104 
2105 /*
2106  * Calculate the allocation size for slabobj_ext array.
2107  *
2108  * When memory allocation profiling is enabled, the obj_exts array
2109  * could be allocated from the same slab cache it's being allocated for.
2110  * This would prevent the slab from ever being freed because it would
2111  * always contain at least one allocated object (its own obj_exts array).
2112  *
2113  * To avoid this, increase the allocation size when we detect the array
2114  * may come from the same cache, forcing it to use a different cache.
2115  */
2116 static inline size_t obj_exts_alloc_size(struct kmem_cache *s,
2117 					 struct slab *slab, gfp_t gfp)
2118 {
2119 	size_t sz = sizeof(struct slabobj_ext) * slab->objects;
2120 	struct kmem_cache *obj_exts_cache;
2121 
2122 	/*
2123 	 * slabobj_ext array for KMALLOC_CGROUP allocations
2124 	 * are served from KMALLOC_NORMAL caches.
2125 	 */
2126 	if (!mem_alloc_profiling_enabled())
2127 		return sz;
2128 
2129 	if (sz > KMALLOC_MAX_CACHE_SIZE)
2130 		return sz;
2131 
2132 	if (!is_kmalloc_normal(s))
2133 		return sz;
2134 
2135 	obj_exts_cache = kmalloc_slab(sz, NULL, gfp, 0);
2136 	/*
2137 	 * We can't simply compare s with obj_exts_cache, because random kmalloc
2138 	 * caches have multiple caches per size, selected by caller address.
2139 	 * Since caller address may differ between kmalloc_slab() and actual
2140 	 * allocation, bump size when sizes are equal.
2141 	 */
2142 	if (s->object_size == obj_exts_cache->object_size)
2143 		return obj_exts_cache->object_size + 1;
2144 
2145 	return sz;
2146 }
2147 
2148 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2149 		        gfp_t gfp, bool new_slab)
2150 {
2151 	bool allow_spin = gfpflags_allow_spinning(gfp);
2152 	unsigned int objects = objs_per_slab(s, slab);
2153 	unsigned long new_exts;
2154 	unsigned long old_exts;
2155 	struct slabobj_ext *vec;
2156 	size_t sz;
2157 
2158 	gfp &= ~OBJCGS_CLEAR_MASK;
2159 	/* Prevent recursive extension vector allocation */
2160 	gfp |= __GFP_NO_OBJ_EXT;
2161 
2162 	sz = obj_exts_alloc_size(s, slab, gfp);
2163 
2164 	/*
2165 	 * Note that allow_spin may be false during early boot and its
2166 	 * restricted GFP_BOOT_MASK. Due to kmalloc_nolock() only supporting
2167 	 * architectures with cmpxchg16b, early obj_exts will be missing for
2168 	 * very early allocations on those.
2169 	 */
2170 	if (unlikely(!allow_spin))
2171 		vec = kmalloc_nolock(sz, __GFP_ZERO | __GFP_NO_OBJ_EXT,
2172 				     slab_nid(slab));
2173 	else
2174 		vec = kmalloc_node(sz, gfp | __GFP_ZERO, slab_nid(slab));
2175 
2176 	if (!vec) {
2177 		/*
2178 		 * Try to mark vectors which failed to allocate.
2179 		 * If this operation fails, there may be a racing process
2180 		 * that has already completed the allocation.
2181 		 */
2182 		if (!mark_failed_objexts_alloc(slab) &&
2183 		    slab_obj_exts(slab))
2184 			return 0;
2185 
2186 		return -ENOMEM;
2187 	}
2188 
2189 	VM_WARN_ON_ONCE(virt_to_slab(vec) != NULL &&
2190 			virt_to_slab(vec)->slab_cache == s);
2191 
2192 	new_exts = (unsigned long)vec;
2193 #ifdef CONFIG_MEMCG
2194 	new_exts |= MEMCG_DATA_OBJEXTS;
2195 #endif
2196 retry:
2197 	old_exts = READ_ONCE(slab->obj_exts);
2198 	handle_failed_objexts_alloc(old_exts, vec, objects);
2199 	slab_set_stride(slab, sizeof(struct slabobj_ext));
2200 
2201 	if (new_slab) {
2202 		/*
2203 		 * If the slab is brand new and nobody can yet access its
2204 		 * obj_exts, no synchronization is required and obj_exts can
2205 		 * be simply assigned.
2206 		 */
2207 		slab->obj_exts = new_exts;
2208 	} else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
2209 		/*
2210 		 * If the slab is already in use, somebody can allocate and
2211 		 * assign slabobj_exts in parallel. In this case the existing
2212 		 * objcg vector should be reused.
2213 		 */
2214 		mark_objexts_empty(vec);
2215 		if (unlikely(!allow_spin))
2216 			kfree_nolock(vec);
2217 		else
2218 			kfree(vec);
2219 		return 0;
2220 	} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
2221 		/* Retry if a racing thread changed slab->obj_exts from under us. */
2222 		goto retry;
2223 	}
2224 
2225 	if (allow_spin)
2226 		kmemleak_not_leak(vec);
2227 	return 0;
2228 }
2229 
2230 static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
2231 {
2232 	struct slabobj_ext *obj_exts;
2233 
2234 	obj_exts = (struct slabobj_ext *)slab_obj_exts(slab);
2235 	if (!obj_exts) {
2236 		/*
2237 		 * If obj_exts allocation failed, slab->obj_exts is set to
2238 		 * OBJEXTS_ALLOC_FAIL. In this case, we end up here and should
2239 		 * clear the flag.
2240 		 */
2241 		slab->obj_exts = 0;
2242 		return;
2243 	}
2244 
2245 	if (obj_exts_in_slab(slab->slab_cache, slab)) {
2246 		slab->obj_exts = 0;
2247 		return;
2248 	}
2249 
2250 	/*
2251 	 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
2252 	 * corresponding extension will be NULL. alloc_tag_sub() will throw a
2253 	 * warning if slab has extensions but the extension of an object is
2254 	 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
2255 	 * the extension for obj_exts is expected to be NULL.
2256 	 */
2257 	mark_objexts_empty(obj_exts);
2258 	if (allow_spin)
2259 		kfree(obj_exts);
2260 	else
2261 		kfree_nolock(obj_exts);
2262 	slab->obj_exts = 0;
2263 }
2264 
2265 /*
2266  * Try to allocate slabobj_ext array from unused space.
2267  * This function must be called on a freshly allocated slab to prevent
2268  * concurrency problems.
2269  */
2270 static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
2271 {
2272 	void *addr;
2273 	unsigned long obj_exts;
2274 
2275 	if (!need_slab_obj_exts(s))
2276 		return;
2277 
2278 	if (obj_exts_fit_within_slab_leftover(s, slab)) {
2279 		addr = slab_address(slab) + obj_exts_offset_in_slab(s, slab);
2280 		addr = kasan_reset_tag(addr);
2281 		obj_exts = (unsigned long)addr;
2282 
2283 		get_slab_obj_exts(obj_exts);
2284 		memset(addr, 0, obj_exts_size_in_slab(slab));
2285 		put_slab_obj_exts(obj_exts);
2286 
2287 #ifdef CONFIG_MEMCG
2288 		obj_exts |= MEMCG_DATA_OBJEXTS;
2289 #endif
2290 		slab->obj_exts = obj_exts;
2291 		slab_set_stride(slab, sizeof(struct slabobj_ext));
2292 	} else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
2293 		unsigned int offset = obj_exts_offset_in_object(s);
2294 
2295 		obj_exts = (unsigned long)slab_address(slab);
2296 		obj_exts += s->red_left_pad;
2297 		obj_exts += offset;
2298 
2299 		get_slab_obj_exts(obj_exts);
2300 		for_each_object(addr, s, slab_address(slab), slab->objects)
2301 			memset(kasan_reset_tag(addr) + offset, 0,
2302 			       sizeof(struct slabobj_ext));
2303 		put_slab_obj_exts(obj_exts);
2304 
2305 #ifdef CONFIG_MEMCG
2306 		obj_exts |= MEMCG_DATA_OBJEXTS;
2307 #endif
2308 		slab->obj_exts = obj_exts;
2309 		slab_set_stride(slab, s->size);
2310 	}
2311 }
2312 
2313 #else /* CONFIG_SLAB_OBJ_EXT */
2314 
2315 static inline void init_slab_obj_exts(struct slab *slab)
2316 {
2317 }
2318 
2319 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2320 			       gfp_t gfp, bool new_slab)
2321 {
2322 	return 0;
2323 }
2324 
2325 static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
2326 {
2327 }
2328 
2329 static inline void alloc_slab_obj_exts_early(struct kmem_cache *s,
2330 						       struct slab *slab)
2331 {
2332 }
2333 
2334 #endif /* CONFIG_SLAB_OBJ_EXT */
2335 
2336 #ifdef CONFIG_MEM_ALLOC_PROFILING
2337 
2338 static inline unsigned long
2339 prepare_slab_obj_exts_hook(struct kmem_cache *s, struct slab *slab,
2340 			   gfp_t flags, void *p)
2341 {
2342 	if (!slab_obj_exts(slab) &&
2343 	    alloc_slab_obj_exts(slab, s, flags, false)) {
2344 		pr_warn_once("%s, %s: Failed to create slab extension vector!\n",
2345 			     __func__, s->name);
2346 		return 0;
2347 	}
2348 
2349 	return slab_obj_exts(slab);
2350 }
2351 
2352 
2353 /* Should be called only if mem_alloc_profiling_enabled() */
2354 static noinline void
2355 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2356 {
2357 	unsigned long obj_exts;
2358 	struct slabobj_ext *obj_ext;
2359 	struct slab *slab;
2360 
2361 	if (!object)
2362 		return;
2363 
2364 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2365 		return;
2366 
2367 	if (flags & __GFP_NO_OBJ_EXT)
2368 		return;
2369 
2370 	slab = virt_to_slab(object);
2371 	obj_exts = prepare_slab_obj_exts_hook(s, slab, flags, object);
2372 	/*
2373 	 * Currently obj_exts is used only for allocation profiling.
2374 	 * If other users appear then mem_alloc_profiling_enabled()
2375 	 * check should be added before alloc_tag_add().
2376 	 */
2377 	if (obj_exts) {
2378 		unsigned int obj_idx = obj_to_index(s, slab, object);
2379 
2380 		get_slab_obj_exts(obj_exts);
2381 		obj_ext = slab_obj_ext(slab, obj_exts, obj_idx);
2382 		alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
2383 		put_slab_obj_exts(obj_exts);
2384 	} else {
2385 		alloc_tag_set_inaccurate(current->alloc_tag);
2386 	}
2387 }
2388 
2389 static inline void
2390 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2391 {
2392 	if (mem_alloc_profiling_enabled())
2393 		__alloc_tagging_slab_alloc_hook(s, object, flags);
2394 }
2395 
2396 /* Should be called only if mem_alloc_profiling_enabled() */
2397 static noinline void
2398 __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2399 			       int objects)
2400 {
2401 	int i;
2402 	unsigned long obj_exts;
2403 
2404 	/* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
2405 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2406 		return;
2407 
2408 	obj_exts = slab_obj_exts(slab);
2409 	if (!obj_exts)
2410 		return;
2411 
2412 	get_slab_obj_exts(obj_exts);
2413 	for (i = 0; i < objects; i++) {
2414 		unsigned int off = obj_to_index(s, slab, p[i]);
2415 
2416 		alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
2417 	}
2418 	put_slab_obj_exts(obj_exts);
2419 }
2420 
2421 static inline void
2422 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2423 			     int objects)
2424 {
2425 	if (mem_alloc_profiling_enabled())
2426 		__alloc_tagging_slab_free_hook(s, slab, p, objects);
2427 }
2428 
2429 #else /* CONFIG_MEM_ALLOC_PROFILING */
2430 
2431 static inline void
2432 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2433 {
2434 }
2435 
2436 static inline void
2437 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2438 			     int objects)
2439 {
2440 }
2441 
2442 #endif /* CONFIG_MEM_ALLOC_PROFILING */
2443 
2444 
2445 #ifdef CONFIG_MEMCG
2446 
2447 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
2448 
2449 static __fastpath_inline
2450 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2451 				gfp_t flags, size_t size, void **p)
2452 {
2453 	if (likely(!memcg_kmem_online()))
2454 		return true;
2455 
2456 	if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2457 		return true;
2458 
2459 	if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
2460 		return true;
2461 
2462 	if (likely(size == 1)) {
2463 		memcg_alloc_abort_single(s, *p);
2464 		*p = NULL;
2465 	} else {
2466 		kmem_cache_free_bulk(s, size, p);
2467 	}
2468 
2469 	return false;
2470 }
2471 
2472 static __fastpath_inline
2473 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2474 			  int objects)
2475 {
2476 	unsigned long obj_exts;
2477 
2478 	if (!memcg_kmem_online())
2479 		return;
2480 
2481 	obj_exts = slab_obj_exts(slab);
2482 	if (likely(!obj_exts))
2483 		return;
2484 
2485 	get_slab_obj_exts(obj_exts);
2486 	__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2487 	put_slab_obj_exts(obj_exts);
2488 }
2489 
2490 static __fastpath_inline
2491 bool memcg_slab_post_charge(void *p, gfp_t flags)
2492 {
2493 	unsigned long obj_exts;
2494 	struct slabobj_ext *obj_ext;
2495 	struct kmem_cache *s;
2496 	struct page *page;
2497 	struct slab *slab;
2498 	unsigned long off;
2499 
2500 	page = virt_to_page(p);
2501 	if (PageLargeKmalloc(page)) {
2502 		unsigned int order;
2503 		int size;
2504 
2505 		if (PageMemcgKmem(page))
2506 			return true;
2507 
2508 		order = large_kmalloc_order(page);
2509 		if (__memcg_kmem_charge_page(page, flags, order))
2510 			return false;
2511 
2512 		/*
2513 		 * This page has already been accounted in the global stats but
2514 		 * not in the memcg stats. So, subtract from the global and use
2515 		 * the interface which adds to both global and memcg stats.
2516 		 */
2517 		size = PAGE_SIZE << order;
2518 		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -size);
2519 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, size);
2520 		return true;
2521 	}
2522 
2523 	slab = page_slab(page);
2524 	s = slab->slab_cache;
2525 
2526 	/*
2527 	 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
2528 	 * of slab_obj_exts being allocated from the same slab and thus the slab
2529 	 * becoming effectively unfreeable.
2530 	 */
2531 	if (is_kmalloc_normal(s))
2532 		return true;
2533 
2534 	/* Ignore already charged objects. */
2535 	obj_exts = slab_obj_exts(slab);
2536 	if (obj_exts) {
2537 		get_slab_obj_exts(obj_exts);
2538 		off = obj_to_index(s, slab, p);
2539 		obj_ext = slab_obj_ext(slab, obj_exts, off);
2540 		if (unlikely(obj_ext->objcg)) {
2541 			put_slab_obj_exts(obj_exts);
2542 			return true;
2543 		}
2544 		put_slab_obj_exts(obj_exts);
2545 	}
2546 
2547 	return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
2548 }
2549 
2550 #else /* CONFIG_MEMCG */
2551 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
2552 					      struct list_lru *lru,
2553 					      gfp_t flags, size_t size,
2554 					      void **p)
2555 {
2556 	return true;
2557 }
2558 
2559 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2560 					void **p, int objects)
2561 {
2562 }
2563 
2564 static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
2565 {
2566 	return true;
2567 }
2568 #endif /* CONFIG_MEMCG */
2569 
2570 #ifdef CONFIG_SLUB_RCU_DEBUG
2571 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
2572 
2573 struct rcu_delayed_free {
2574 	struct rcu_head head;
2575 	void *object;
2576 };
2577 #endif
2578 
2579 /*
2580  * Hooks for other subsystems that check memory allocations. In a typical
2581  * production configuration these hooks all should produce no code at all.
2582  *
2583  * Returns true if freeing of the object can proceed, false if its reuse
2584  * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
2585  * to KFENCE.
2586  *
2587  * For objects allocated via kmalloc_nolock(), only a subset of alloc hooks
2588  * are invoked, so some free hooks must handle asymmetric hook calls.
2589  *
2590  * Alloc hooks called for kmalloc_nolock():
2591  * - kmsan_slab_alloc()
2592  * - kasan_slab_alloc()
2593  * - memcg_slab_post_alloc_hook()
2594  * - alloc_tagging_slab_alloc_hook()
2595  *
2596  * Free hooks that must handle missing corresponding alloc hooks:
2597  * - kmemleak_free_recursive()
2598  * - kfence_free()
2599  *
2600  * Free hooks that have no alloc hook counterpart, and thus safe to call:
2601  * - debug_check_no_locks_freed()
2602  * - debug_check_no_obj_freed()
2603  * - __kcsan_check_access()
2604  */
2605 static __always_inline
2606 bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
2607 		    bool after_rcu_delay)
2608 {
2609 	/* Are the object contents still accessible? */
2610 	bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
2611 
2612 	kmemleak_free_recursive(x, s->flags);
2613 	kmsan_slab_free(s, x);
2614 
2615 	debug_check_no_locks_freed(x, s->object_size);
2616 
2617 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
2618 		debug_check_no_obj_freed(x, s->object_size);
2619 
2620 	/* Use KCSAN to help debug racy use-after-free. */
2621 	if (!still_accessible)
2622 		__kcsan_check_access(x, s->object_size,
2623 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2624 
2625 	if (kfence_free(x))
2626 		return false;
2627 
2628 	/*
2629 	 * Give KASAN a chance to notice an invalid free operation before we
2630 	 * modify the object.
2631 	 */
2632 	if (kasan_slab_pre_free(s, x))
2633 		return false;
2634 
2635 #ifdef CONFIG_SLUB_RCU_DEBUG
2636 	if (still_accessible) {
2637 		struct rcu_delayed_free *delayed_free;
2638 
2639 		delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
2640 		if (delayed_free) {
2641 			/*
2642 			 * Let KASAN track our call stack as a "related work
2643 			 * creation", just like if the object had been freed
2644 			 * normally via kfree_rcu().
2645 			 * We have to do this manually because the rcu_head is
2646 			 * not located inside the object.
2647 			 */
2648 			kasan_record_aux_stack(x);
2649 
2650 			delayed_free->object = x;
2651 			call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
2652 			return false;
2653 		}
2654 	}
2655 #endif /* CONFIG_SLUB_RCU_DEBUG */
2656 
2657 	/*
2658 	 * As memory initialization might be integrated into KASAN,
2659 	 * kasan_slab_free and initialization memset's must be
2660 	 * kept together to avoid discrepancies in behavior.
2661 	 *
2662 	 * The initialization memset's clear the object and the metadata,
2663 	 * but don't touch the SLAB redzone.
2664 	 *
2665 	 * The object's freepointer is also avoided if stored outside the
2666 	 * object.
2667 	 */
2668 	if (unlikely(init)) {
2669 		int rsize;
2670 		unsigned int inuse, orig_size;
2671 
2672 		inuse = get_info_end(s);
2673 		orig_size = get_orig_size(s, x);
2674 		if (!kasan_has_integrated_init())
2675 			memset(kasan_reset_tag(x), 0, orig_size);
2676 		rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2677 		memset((char *)kasan_reset_tag(x) + inuse, 0,
2678 		       s->size - inuse - rsize);
2679 		/*
2680 		 * Restore orig_size, otherwise kmalloc redzone overwritten
2681 		 * would be reported
2682 		 */
2683 		set_orig_size(s, x, orig_size);
2684 
2685 	}
2686 	/* KASAN might put x into memory quarantine, delaying its reuse. */
2687 	return !kasan_slab_free(s, x, init, still_accessible, false);
2688 }
2689 
2690 static __fastpath_inline
2691 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
2692 			     int *cnt)
2693 {
2694 
2695 	void *object;
2696 	void *next = *head;
2697 	void *old_tail = *tail;
2698 	bool init;
2699 
2700 	if (is_kfence_address(next)) {
2701 		slab_free_hook(s, next, false, false);
2702 		return false;
2703 	}
2704 
2705 	/* Head and tail of the reconstructed freelist */
2706 	*head = NULL;
2707 	*tail = NULL;
2708 
2709 	init = slab_want_init_on_free(s);
2710 
2711 	do {
2712 		object = next;
2713 		next = get_freepointer(s, object);
2714 
2715 		/* If object's reuse doesn't have to be delayed */
2716 		if (likely(slab_free_hook(s, object, init, false))) {
2717 			/* Move object to the new freelist */
2718 			set_freepointer(s, object, *head);
2719 			*head = object;
2720 			if (!*tail)
2721 				*tail = object;
2722 		} else {
2723 			/*
2724 			 * Adjust the reconstructed freelist depth
2725 			 * accordingly if object's reuse is delayed.
2726 			 */
2727 			--(*cnt);
2728 		}
2729 	} while (object != old_tail);
2730 
2731 	return *head != NULL;
2732 }
2733 
2734 static void *setup_object(struct kmem_cache *s, void *object)
2735 {
2736 	setup_object_debug(s, object);
2737 	object = kasan_init_slab_obj(s, object);
2738 	if (unlikely(s->ctor)) {
2739 		kasan_unpoison_new_object(s, object);
2740 		s->ctor(object);
2741 		kasan_poison_new_object(s, object);
2742 	}
2743 	return object;
2744 }
2745 
2746 static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp,
2747 					      unsigned int capacity)
2748 {
2749 	struct slab_sheaf *sheaf;
2750 	size_t sheaf_size;
2751 
2752 	if (gfp & __GFP_NO_OBJ_EXT)
2753 		return NULL;
2754 
2755 	gfp &= ~OBJCGS_CLEAR_MASK;
2756 
2757 	/*
2758 	 * Prevent recursion to the same cache, or a deep stack of kmallocs of
2759 	 * varying sizes (sheaf capacity might differ for each kmalloc size
2760 	 * bucket)
2761 	 */
2762 	if (s->flags & SLAB_KMALLOC)
2763 		gfp |= __GFP_NO_OBJ_EXT;
2764 
2765 	sheaf_size = struct_size(sheaf, objects, capacity);
2766 	sheaf = kzalloc(sheaf_size, gfp);
2767 
2768 	if (unlikely(!sheaf))
2769 		return NULL;
2770 
2771 	sheaf->cache = s;
2772 
2773 	stat(s, SHEAF_ALLOC);
2774 
2775 	return sheaf;
2776 }
2777 
2778 static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
2779 						   gfp_t gfp)
2780 {
2781 	return __alloc_empty_sheaf(s, gfp, s->sheaf_capacity);
2782 }
2783 
2784 static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
2785 {
2786 	kfree(sheaf);
2787 
2788 	stat(s, SHEAF_FREE);
2789 }
2790 
2791 static unsigned int
2792 refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
2793 	       unsigned int max);
2794 
2795 static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf,
2796 			 gfp_t gfp)
2797 {
2798 	int to_fill = s->sheaf_capacity - sheaf->size;
2799 	int filled;
2800 
2801 	if (!to_fill)
2802 		return 0;
2803 
2804 	filled = refill_objects(s, &sheaf->objects[sheaf->size], gfp, to_fill,
2805 				to_fill);
2806 
2807 	sheaf->size += filled;
2808 
2809 	stat_add(s, SHEAF_REFILL, filled);
2810 
2811 	if (filled < to_fill)
2812 		return -ENOMEM;
2813 
2814 	return 0;
2815 }
2816 
2817 
2818 static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
2819 {
2820 	struct slab_sheaf *sheaf = alloc_empty_sheaf(s, gfp);
2821 
2822 	if (!sheaf)
2823 		return NULL;
2824 
2825 	if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
2826 		free_empty_sheaf(s, sheaf);
2827 		return NULL;
2828 	}
2829 
2830 	return sheaf;
2831 }
2832 
2833 /*
2834  * Maximum number of objects freed during a single flush of main pcs sheaf.
2835  * Translates directly to an on-stack array size.
2836  */
2837 #define PCS_BATCH_MAX	32U
2838 
2839 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
2840 
2841 /*
2842  * Free all objects from the main sheaf. In order to perform
2843  * __kmem_cache_free_bulk() outside of cpu_sheaves->lock, work in batches where
2844  * object pointers are moved to a on-stack array under the lock. To bound the
2845  * stack usage, limit each batch to PCS_BATCH_MAX.
2846  *
2847  * returns true if at least partially flushed
2848  */
2849 static bool sheaf_flush_main(struct kmem_cache *s)
2850 {
2851 	struct slub_percpu_sheaves *pcs;
2852 	unsigned int batch, remaining;
2853 	void *objects[PCS_BATCH_MAX];
2854 	struct slab_sheaf *sheaf;
2855 	bool ret = false;
2856 
2857 next_batch:
2858 	if (!local_trylock(&s->cpu_sheaves->lock))
2859 		return ret;
2860 
2861 	pcs = this_cpu_ptr(s->cpu_sheaves);
2862 	sheaf = pcs->main;
2863 
2864 	batch = min(PCS_BATCH_MAX, sheaf->size);
2865 
2866 	sheaf->size -= batch;
2867 	memcpy(objects, sheaf->objects + sheaf->size, batch * sizeof(void *));
2868 
2869 	remaining = sheaf->size;
2870 
2871 	local_unlock(&s->cpu_sheaves->lock);
2872 
2873 	__kmem_cache_free_bulk(s, batch, &objects[0]);
2874 
2875 	stat_add(s, SHEAF_FLUSH, batch);
2876 
2877 	ret = true;
2878 
2879 	if (remaining)
2880 		goto next_batch;
2881 
2882 	return ret;
2883 }
2884 
2885 /*
2886  * Free all objects from a sheaf that's unused, i.e. not linked to any
2887  * cpu_sheaves, so we need no locking and batching. The locking is also not
2888  * necessary when flushing cpu's sheaves (both spare and main) during cpu
2889  * hotremove as the cpu is not executing anymore.
2890  */
2891 static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf)
2892 {
2893 	if (!sheaf->size)
2894 		return;
2895 
2896 	stat_add(s, SHEAF_FLUSH, sheaf->size);
2897 
2898 	__kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
2899 
2900 	sheaf->size = 0;
2901 }
2902 
2903 static bool __rcu_free_sheaf_prepare(struct kmem_cache *s,
2904 				     struct slab_sheaf *sheaf)
2905 {
2906 	bool init = slab_want_init_on_free(s);
2907 	void **p = &sheaf->objects[0];
2908 	unsigned int i = 0;
2909 	bool pfmemalloc = false;
2910 
2911 	while (i < sheaf->size) {
2912 		struct slab *slab = virt_to_slab(p[i]);
2913 
2914 		memcg_slab_free_hook(s, slab, p + i, 1);
2915 		alloc_tagging_slab_free_hook(s, slab, p + i, 1);
2916 
2917 		if (unlikely(!slab_free_hook(s, p[i], init, true))) {
2918 			p[i] = p[--sheaf->size];
2919 			continue;
2920 		}
2921 
2922 		if (slab_test_pfmemalloc(slab))
2923 			pfmemalloc = true;
2924 
2925 		i++;
2926 	}
2927 
2928 	return pfmemalloc;
2929 }
2930 
2931 static void rcu_free_sheaf_nobarn(struct rcu_head *head)
2932 {
2933 	struct slab_sheaf *sheaf;
2934 	struct kmem_cache *s;
2935 
2936 	sheaf = container_of(head, struct slab_sheaf, rcu_head);
2937 	s = sheaf->cache;
2938 
2939 	__rcu_free_sheaf_prepare(s, sheaf);
2940 
2941 	sheaf_flush_unused(s, sheaf);
2942 
2943 	free_empty_sheaf(s, sheaf);
2944 }
2945 
2946 /*
2947  * Caller needs to make sure migration is disabled in order to fully flush
2948  * single cpu's sheaves
2949  *
2950  * must not be called from an irq
2951  *
2952  * flushing operations are rare so let's keep it simple and flush to slabs
2953  * directly, skipping the barn
2954  */
2955 static void pcs_flush_all(struct kmem_cache *s)
2956 {
2957 	struct slub_percpu_sheaves *pcs;
2958 	struct slab_sheaf *spare, *rcu_free;
2959 
2960 	local_lock(&s->cpu_sheaves->lock);
2961 	pcs = this_cpu_ptr(s->cpu_sheaves);
2962 
2963 	spare = pcs->spare;
2964 	pcs->spare = NULL;
2965 
2966 	rcu_free = pcs->rcu_free;
2967 	pcs->rcu_free = NULL;
2968 
2969 	local_unlock(&s->cpu_sheaves->lock);
2970 
2971 	if (spare) {
2972 		sheaf_flush_unused(s, spare);
2973 		free_empty_sheaf(s, spare);
2974 	}
2975 
2976 	if (rcu_free)
2977 		call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
2978 
2979 	sheaf_flush_main(s);
2980 }
2981 
2982 static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu)
2983 {
2984 	struct slub_percpu_sheaves *pcs;
2985 
2986 	pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
2987 
2988 	/* The cpu is not executing anymore so we don't need pcs->lock */
2989 	sheaf_flush_unused(s, pcs->main);
2990 	if (pcs->spare) {
2991 		sheaf_flush_unused(s, pcs->spare);
2992 		free_empty_sheaf(s, pcs->spare);
2993 		pcs->spare = NULL;
2994 	}
2995 
2996 	if (pcs->rcu_free) {
2997 		call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn);
2998 		pcs->rcu_free = NULL;
2999 	}
3000 }
3001 
3002 static void pcs_destroy(struct kmem_cache *s)
3003 {
3004 	int cpu;
3005 
3006 	/*
3007 	 * We may be unwinding cache creation that failed before or during the
3008 	 * allocation of this.
3009 	 */
3010 	if (!s->cpu_sheaves)
3011 		return;
3012 
3013 	/* pcs->main can only point to the bootstrap sheaf, nothing to free */
3014 	if (!cache_has_sheaves(s))
3015 		goto free_pcs;
3016 
3017 	for_each_possible_cpu(cpu) {
3018 		struct slub_percpu_sheaves *pcs;
3019 
3020 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3021 
3022 		/* This can happen when unwinding failed cache creation. */
3023 		if (!pcs->main)
3024 			continue;
3025 
3026 		/*
3027 		 * We have already passed __kmem_cache_shutdown() so everything
3028 		 * was flushed and there should be no objects allocated from
3029 		 * slabs, otherwise kmem_cache_destroy() would have aborted.
3030 		 * Therefore something would have to be really wrong if the
3031 		 * warnings here trigger, and we should rather leave objects and
3032 		 * sheaves to leak in that case.
3033 		 */
3034 
3035 		WARN_ON(pcs->spare);
3036 		WARN_ON(pcs->rcu_free);
3037 
3038 		if (!WARN_ON(pcs->main->size)) {
3039 			free_empty_sheaf(s, pcs->main);
3040 			pcs->main = NULL;
3041 		}
3042 	}
3043 
3044 free_pcs:
3045 	free_percpu(s->cpu_sheaves);
3046 	s->cpu_sheaves = NULL;
3047 }
3048 
3049 static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn,
3050 					       bool allow_spin)
3051 {
3052 	struct slab_sheaf *empty = NULL;
3053 	unsigned long flags;
3054 
3055 	if (!data_race(barn->nr_empty))
3056 		return NULL;
3057 
3058 	if (likely(allow_spin))
3059 		spin_lock_irqsave(&barn->lock, flags);
3060 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3061 		return NULL;
3062 
3063 	if (likely(barn->nr_empty)) {
3064 		empty = list_first_entry(&barn->sheaves_empty,
3065 					 struct slab_sheaf, barn_list);
3066 		list_del(&empty->barn_list);
3067 		barn->nr_empty--;
3068 	}
3069 
3070 	spin_unlock_irqrestore(&barn->lock, flags);
3071 
3072 	return empty;
3073 }
3074 
3075 /*
3076  * The following two functions are used mainly in cases where we have to undo an
3077  * intended action due to a race or cpu migration. Thus they do not check the
3078  * empty or full sheaf limits for simplicity.
3079  */
3080 
3081 static void barn_put_empty_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
3082 {
3083 	unsigned long flags;
3084 
3085 	spin_lock_irqsave(&barn->lock, flags);
3086 
3087 	list_add(&sheaf->barn_list, &barn->sheaves_empty);
3088 	barn->nr_empty++;
3089 
3090 	spin_unlock_irqrestore(&barn->lock, flags);
3091 }
3092 
3093 static void barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
3094 {
3095 	unsigned long flags;
3096 
3097 	spin_lock_irqsave(&barn->lock, flags);
3098 
3099 	list_add(&sheaf->barn_list, &barn->sheaves_full);
3100 	barn->nr_full++;
3101 
3102 	spin_unlock_irqrestore(&barn->lock, flags);
3103 }
3104 
3105 static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
3106 {
3107 	struct slab_sheaf *sheaf = NULL;
3108 	unsigned long flags;
3109 
3110 	if (!data_race(barn->nr_full) && !data_race(barn->nr_empty))
3111 		return NULL;
3112 
3113 	spin_lock_irqsave(&barn->lock, flags);
3114 
3115 	if (barn->nr_full) {
3116 		sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
3117 					barn_list);
3118 		list_del(&sheaf->barn_list);
3119 		barn->nr_full--;
3120 	} else if (barn->nr_empty) {
3121 		sheaf = list_first_entry(&barn->sheaves_empty,
3122 					 struct slab_sheaf, barn_list);
3123 		list_del(&sheaf->barn_list);
3124 		barn->nr_empty--;
3125 	}
3126 
3127 	spin_unlock_irqrestore(&barn->lock, flags);
3128 
3129 	return sheaf;
3130 }
3131 
3132 /*
3133  * If a full sheaf is available, return it and put the supplied empty one to
3134  * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't
3135  * change.
3136  */
3137 static struct slab_sheaf *
3138 barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty,
3139 			 bool allow_spin)
3140 {
3141 	struct slab_sheaf *full = NULL;
3142 	unsigned long flags;
3143 
3144 	if (!data_race(barn->nr_full))
3145 		return NULL;
3146 
3147 	if (likely(allow_spin))
3148 		spin_lock_irqsave(&barn->lock, flags);
3149 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3150 		return NULL;
3151 
3152 	if (likely(barn->nr_full)) {
3153 		full = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
3154 					barn_list);
3155 		list_del(&full->barn_list);
3156 		list_add(&empty->barn_list, &barn->sheaves_empty);
3157 		barn->nr_full--;
3158 		barn->nr_empty++;
3159 	}
3160 
3161 	spin_unlock_irqrestore(&barn->lock, flags);
3162 
3163 	return full;
3164 }
3165 
3166 /*
3167  * If an empty sheaf is available, return it and put the supplied full one to
3168  * barn. But if there are too many full sheaves, reject this with -E2BIG.
3169  */
3170 static struct slab_sheaf *
3171 barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full,
3172 			bool allow_spin)
3173 {
3174 	struct slab_sheaf *empty;
3175 	unsigned long flags;
3176 
3177 	/* we don't repeat this check under barn->lock as it's not critical */
3178 	if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES)
3179 		return ERR_PTR(-E2BIG);
3180 	if (!data_race(barn->nr_empty))
3181 		return ERR_PTR(-ENOMEM);
3182 
3183 	if (likely(allow_spin))
3184 		spin_lock_irqsave(&barn->lock, flags);
3185 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3186 		return ERR_PTR(-EBUSY);
3187 
3188 	if (likely(barn->nr_empty)) {
3189 		empty = list_first_entry(&barn->sheaves_empty, struct slab_sheaf,
3190 					 barn_list);
3191 		list_del(&empty->barn_list);
3192 		list_add(&full->barn_list, &barn->sheaves_full);
3193 		barn->nr_empty--;
3194 		barn->nr_full++;
3195 	} else {
3196 		empty = ERR_PTR(-ENOMEM);
3197 	}
3198 
3199 	spin_unlock_irqrestore(&barn->lock, flags);
3200 
3201 	return empty;
3202 }
3203 
3204 static void barn_init(struct node_barn *barn)
3205 {
3206 	spin_lock_init(&barn->lock);
3207 	INIT_LIST_HEAD(&barn->sheaves_full);
3208 	INIT_LIST_HEAD(&barn->sheaves_empty);
3209 	barn->nr_full = 0;
3210 	barn->nr_empty = 0;
3211 }
3212 
3213 static void barn_shrink(struct kmem_cache *s, struct node_barn *barn)
3214 {
3215 	LIST_HEAD(empty_list);
3216 	LIST_HEAD(full_list);
3217 	struct slab_sheaf *sheaf, *sheaf2;
3218 	unsigned long flags;
3219 
3220 	spin_lock_irqsave(&barn->lock, flags);
3221 
3222 	list_splice_init(&barn->sheaves_full, &full_list);
3223 	barn->nr_full = 0;
3224 	list_splice_init(&barn->sheaves_empty, &empty_list);
3225 	barn->nr_empty = 0;
3226 
3227 	spin_unlock_irqrestore(&barn->lock, flags);
3228 
3229 	list_for_each_entry_safe(sheaf, sheaf2, &full_list, barn_list) {
3230 		sheaf_flush_unused(s, sheaf);
3231 		free_empty_sheaf(s, sheaf);
3232 	}
3233 
3234 	list_for_each_entry_safe(sheaf, sheaf2, &empty_list, barn_list)
3235 		free_empty_sheaf(s, sheaf);
3236 }
3237 
3238 /*
3239  * Slab allocation and freeing
3240  */
3241 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
3242 					   struct kmem_cache_order_objects oo,
3243 					   bool allow_spin)
3244 {
3245 	struct page *page;
3246 	struct slab *slab;
3247 	unsigned int order = oo_order(oo);
3248 
3249 	if (unlikely(!allow_spin))
3250 		page = alloc_frozen_pages_nolock(0/* __GFP_COMP is implied */,
3251 								  node, order);
3252 	else if (node == NUMA_NO_NODE)
3253 		page = alloc_frozen_pages(flags, order);
3254 	else
3255 		page = __alloc_frozen_pages(flags, order, node, NULL);
3256 
3257 	if (!page)
3258 		return NULL;
3259 
3260 	__SetPageSlab(page);
3261 	slab = page_slab(page);
3262 	if (page_is_pfmemalloc(page))
3263 		slab_set_pfmemalloc(slab);
3264 
3265 	return slab;
3266 }
3267 
3268 #ifdef CONFIG_SLAB_FREELIST_RANDOM
3269 /* Pre-initialize the random sequence cache */
3270 static int init_cache_random_seq(struct kmem_cache *s)
3271 {
3272 	unsigned int count = oo_objects(s->oo);
3273 	int err;
3274 
3275 	/* Bailout if already initialised */
3276 	if (s->random_seq)
3277 		return 0;
3278 
3279 	err = cache_random_seq_create(s, count, GFP_KERNEL);
3280 	if (err) {
3281 		pr_err("SLUB: Unable to initialize free list for %s\n",
3282 			s->name);
3283 		return err;
3284 	}
3285 
3286 	/* Transform to an offset on the set of pages */
3287 	if (s->random_seq) {
3288 		unsigned int i;
3289 
3290 		for (i = 0; i < count; i++)
3291 			s->random_seq[i] *= s->size;
3292 	}
3293 	return 0;
3294 }
3295 
3296 /* Initialize each random sequence freelist per cache */
3297 static void __init init_freelist_randomization(void)
3298 {
3299 	struct kmem_cache *s;
3300 
3301 	mutex_lock(&slab_mutex);
3302 
3303 	list_for_each_entry(s, &slab_caches, list)
3304 		init_cache_random_seq(s);
3305 
3306 	mutex_unlock(&slab_mutex);
3307 }
3308 
3309 /* Get the next entry on the pre-computed freelist randomized */
3310 static void *next_freelist_entry(struct kmem_cache *s,
3311 				unsigned long *pos, void *start,
3312 				unsigned long page_limit,
3313 				unsigned long freelist_count)
3314 {
3315 	unsigned int idx;
3316 
3317 	/*
3318 	 * If the target page allocation failed, the number of objects on the
3319 	 * page might be smaller than the usual size defined by the cache.
3320 	 */
3321 	do {
3322 		idx = s->random_seq[*pos];
3323 		*pos += 1;
3324 		if (*pos >= freelist_count)
3325 			*pos = 0;
3326 	} while (unlikely(idx >= page_limit));
3327 
3328 	return (char *)start + idx;
3329 }
3330 
3331 static DEFINE_PER_CPU(struct rnd_state, slab_rnd_state);
3332 
3333 /* Shuffle the single linked freelist based on a random pre-computed sequence */
3334 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
3335 			     bool allow_spin)
3336 {
3337 	void *start;
3338 	void *cur;
3339 	void *next;
3340 	unsigned long idx, pos, page_limit, freelist_count;
3341 
3342 	if (slab->objects < 2 || !s->random_seq)
3343 		return false;
3344 
3345 	freelist_count = oo_objects(s->oo);
3346 	if (allow_spin) {
3347 		pos = get_random_u32_below(freelist_count);
3348 	} else {
3349 		struct rnd_state *state;
3350 
3351 		/*
3352 		 * An interrupt or NMI handler might interrupt and change
3353 		 * the state in the middle, but that's safe.
3354 		 */
3355 		state = &get_cpu_var(slab_rnd_state);
3356 		pos = prandom_u32_state(state) % freelist_count;
3357 		put_cpu_var(slab_rnd_state);
3358 	}
3359 
3360 	page_limit = slab->objects * s->size;
3361 	start = fixup_red_left(s, slab_address(slab));
3362 
3363 	/* First entry is used as the base of the freelist */
3364 	cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
3365 	cur = setup_object(s, cur);
3366 	slab->freelist = cur;
3367 
3368 	for (idx = 1; idx < slab->objects; idx++) {
3369 		next = next_freelist_entry(s, &pos, start, page_limit,
3370 			freelist_count);
3371 		next = setup_object(s, next);
3372 		set_freepointer(s, cur, next);
3373 		cur = next;
3374 	}
3375 	set_freepointer(s, cur, NULL);
3376 
3377 	return true;
3378 }
3379 #else
3380 static inline int init_cache_random_seq(struct kmem_cache *s)
3381 {
3382 	return 0;
3383 }
3384 static inline void init_freelist_randomization(void) { }
3385 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
3386 				    bool allow_spin)
3387 {
3388 	return false;
3389 }
3390 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
3391 
3392 static __always_inline void account_slab(struct slab *slab, int order,
3393 					 struct kmem_cache *s, gfp_t gfp)
3394 {
3395 	if (memcg_kmem_online() &&
3396 			(s->flags & SLAB_ACCOUNT) &&
3397 			!slab_obj_exts(slab))
3398 		alloc_slab_obj_exts(slab, s, gfp, true);
3399 
3400 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
3401 			    PAGE_SIZE << order);
3402 }
3403 
3404 static __always_inline void unaccount_slab(struct slab *slab, int order,
3405 					   struct kmem_cache *s, bool allow_spin)
3406 {
3407 	/*
3408 	 * The slab object extensions should now be freed regardless of
3409 	 * whether mem_alloc_profiling_enabled() or not because profiling
3410 	 * might have been disabled after slab->obj_exts got allocated.
3411 	 */
3412 	free_slab_obj_exts(slab, allow_spin);
3413 
3414 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
3415 			    -(PAGE_SIZE << order));
3416 }
3417 
3418 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
3419 {
3420 	bool allow_spin = gfpflags_allow_spinning(flags);
3421 	struct slab *slab;
3422 	struct kmem_cache_order_objects oo = s->oo;
3423 	gfp_t alloc_gfp;
3424 	void *start, *p, *next;
3425 	int idx;
3426 	bool shuffle;
3427 
3428 	flags &= gfp_allowed_mask;
3429 
3430 	flags |= s->allocflags;
3431 
3432 	/*
3433 	 * Let the initial higher-order allocation fail under memory pressure
3434 	 * so we fall-back to the minimum order allocation.
3435 	 */
3436 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
3437 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
3438 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
3439 
3440 	/*
3441 	 * __GFP_RECLAIM could be cleared on the first allocation attempt,
3442 	 * so pass allow_spin flag directly.
3443 	 */
3444 	slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
3445 	if (unlikely(!slab)) {
3446 		oo = s->min;
3447 		alloc_gfp = flags;
3448 		/*
3449 		 * Allocation may have failed due to fragmentation.
3450 		 * Try a lower order alloc if possible
3451 		 */
3452 		slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
3453 		if (unlikely(!slab))
3454 			return NULL;
3455 		stat(s, ORDER_FALLBACK);
3456 	}
3457 
3458 	slab->objects = oo_objects(oo);
3459 	slab->inuse = 0;
3460 	slab->frozen = 0;
3461 
3462 	slab->slab_cache = s;
3463 
3464 	kasan_poison_slab(slab);
3465 
3466 	start = slab_address(slab);
3467 
3468 	setup_slab_debug(s, slab, start);
3469 	init_slab_obj_exts(slab);
3470 	/*
3471 	 * Poison the slab before initializing the slabobj_ext array
3472 	 * to prevent the array from being overwritten.
3473 	 */
3474 	alloc_slab_obj_exts_early(s, slab);
3475 	account_slab(slab, oo_order(oo), s, flags);
3476 
3477 	shuffle = shuffle_freelist(s, slab, allow_spin);
3478 
3479 	if (!shuffle) {
3480 		start = fixup_red_left(s, start);
3481 		start = setup_object(s, start);
3482 		slab->freelist = start;
3483 		for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
3484 			next = p + s->size;
3485 			next = setup_object(s, next);
3486 			set_freepointer(s, p, next);
3487 			p = next;
3488 		}
3489 		set_freepointer(s, p, NULL);
3490 	}
3491 
3492 	return slab;
3493 }
3494 
3495 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
3496 {
3497 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
3498 		flags = kmalloc_fix_flags(flags);
3499 
3500 	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
3501 
3502 	return allocate_slab(s,
3503 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
3504 }
3505 
3506 static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin)
3507 {
3508 	struct page *page = slab_page(slab);
3509 	int order = compound_order(page);
3510 	int pages = 1 << order;
3511 
3512 	__slab_clear_pfmemalloc(slab);
3513 	page->mapping = NULL;
3514 	__ClearPageSlab(page);
3515 	mm_account_reclaimed_pages(pages);
3516 	unaccount_slab(slab, order, s, allow_spin);
3517 	if (allow_spin)
3518 		free_frozen_pages(page, order);
3519 	else
3520 		free_frozen_pages_nolock(page, order);
3521 }
3522 
3523 static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab)
3524 {
3525 	/*
3526 	 * Since it was just allocated, we can skip the actions in
3527 	 * discard_slab() and free_slab().
3528 	 */
3529 	__free_slab(s, slab, false);
3530 }
3531 
3532 static void rcu_free_slab(struct rcu_head *h)
3533 {
3534 	struct slab *slab = container_of(h, struct slab, rcu_head);
3535 
3536 	__free_slab(slab->slab_cache, slab, true);
3537 }
3538 
3539 static void free_slab(struct kmem_cache *s, struct slab *slab)
3540 {
3541 	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
3542 		void *p;
3543 
3544 		slab_pad_check(s, slab);
3545 		for_each_object(p, s, slab_address(slab), slab->objects)
3546 			check_object(s, slab, p, SLUB_RED_INACTIVE);
3547 	}
3548 
3549 	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
3550 		call_rcu(&slab->rcu_head, rcu_free_slab);
3551 	else
3552 		__free_slab(s, slab, true);
3553 }
3554 
3555 static void discard_slab(struct kmem_cache *s, struct slab *slab)
3556 {
3557 	dec_slabs_node(s, slab_nid(slab), slab->objects);
3558 	free_slab(s, slab);
3559 }
3560 
3561 static inline bool slab_test_node_partial(const struct slab *slab)
3562 {
3563 	return test_bit(SL_partial, &slab->flags.f);
3564 }
3565 
3566 static inline void slab_set_node_partial(struct slab *slab)
3567 {
3568 	set_bit(SL_partial, &slab->flags.f);
3569 }
3570 
3571 static inline void slab_clear_node_partial(struct slab *slab)
3572 {
3573 	clear_bit(SL_partial, &slab->flags.f);
3574 }
3575 
3576 /*
3577  * Management of partially allocated slabs.
3578  */
3579 static inline void
3580 __add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode)
3581 {
3582 	n->nr_partial++;
3583 	if (mode == ADD_TO_TAIL)
3584 		list_add_tail(&slab->slab_list, &n->partial);
3585 	else
3586 		list_add(&slab->slab_list, &n->partial);
3587 	slab_set_node_partial(slab);
3588 }
3589 
3590 static inline void add_partial(struct kmem_cache_node *n,
3591 				struct slab *slab, enum add_mode mode)
3592 {
3593 	lockdep_assert_held(&n->list_lock);
3594 	__add_partial(n, slab, mode);
3595 }
3596 
3597 static inline void remove_partial(struct kmem_cache_node *n,
3598 					struct slab *slab)
3599 {
3600 	lockdep_assert_held(&n->list_lock);
3601 	list_del(&slab->slab_list);
3602 	slab_clear_node_partial(slab);
3603 	n->nr_partial--;
3604 }
3605 
3606 /*
3607  * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
3608  * slab from the n->partial list. Remove only a single object from the slab, do
3609  * the alloc_debug_processing() checks and leave the slab on the list, or move
3610  * it to full list if it was the last free object.
3611  */
3612 static void *alloc_single_from_partial(struct kmem_cache *s,
3613 		struct kmem_cache_node *n, struct slab *slab, int orig_size)
3614 {
3615 	void *object;
3616 
3617 	lockdep_assert_held(&n->list_lock);
3618 
3619 #ifdef CONFIG_SLUB_DEBUG
3620 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3621 		if (!validate_slab_ptr(slab)) {
3622 			slab_err(s, slab, "Not a valid slab page");
3623 			return NULL;
3624 		}
3625 	}
3626 #endif
3627 
3628 	object = slab->freelist;
3629 	slab->freelist = get_freepointer(s, object);
3630 	slab->inuse++;
3631 
3632 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
3633 		remove_partial(n, slab);
3634 		return NULL;
3635 	}
3636 
3637 	if (slab->inuse == slab->objects) {
3638 		remove_partial(n, slab);
3639 		add_full(s, n, slab);
3640 	}
3641 
3642 	return object;
3643 }
3644 
3645 /*
3646  * Called only for kmem_cache_debug() caches to allocate from a freshly
3647  * allocated slab. Allocate a single object instead of whole freelist
3648  * and put the slab to the partial (or full) list.
3649  */
3650 static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
3651 					int orig_size, gfp_t gfpflags)
3652 {
3653 	bool allow_spin = gfpflags_allow_spinning(gfpflags);
3654 	int nid = slab_nid(slab);
3655 	struct kmem_cache_node *n = get_node(s, nid);
3656 	unsigned long flags;
3657 	void *object;
3658 
3659 	if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
3660 		/* Unlucky, discard newly allocated slab. */
3661 		free_new_slab_nolock(s, slab);
3662 		return NULL;
3663 	}
3664 
3665 	object = slab->freelist;
3666 	slab->freelist = get_freepointer(s, object);
3667 	slab->inuse = 1;
3668 
3669 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
3670 		/*
3671 		 * It's not really expected that this would fail on a
3672 		 * freshly allocated slab, but a concurrent memory
3673 		 * corruption in theory could cause that.
3674 		 * Leak memory of allocated slab.
3675 		 */
3676 		if (!allow_spin)
3677 			spin_unlock_irqrestore(&n->list_lock, flags);
3678 		return NULL;
3679 	}
3680 
3681 	if (allow_spin)
3682 		spin_lock_irqsave(&n->list_lock, flags);
3683 
3684 	if (slab->inuse == slab->objects)
3685 		add_full(s, n, slab);
3686 	else
3687 		add_partial(n, slab, ADD_TO_HEAD);
3688 
3689 	inc_slabs_node(s, nid, slab->objects);
3690 	spin_unlock_irqrestore(&n->list_lock, flags);
3691 
3692 	return object;
3693 }
3694 
3695 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
3696 
3697 static bool get_partial_node_bulk(struct kmem_cache *s,
3698 				  struct kmem_cache_node *n,
3699 				  struct partial_bulk_context *pc,
3700 				  bool allow_spin)
3701 {
3702 	struct slab *slab, *slab2;
3703 	unsigned int total_free = 0;
3704 	unsigned long flags;
3705 
3706 	/* Racy check to avoid taking the lock unnecessarily. */
3707 	if (!n || data_race(!n->nr_partial))
3708 		return false;
3709 
3710 	INIT_LIST_HEAD(&pc->slabs);
3711 
3712 	if (allow_spin)
3713 		spin_lock_irqsave(&n->list_lock, flags);
3714 	else if (!spin_trylock_irqsave(&n->list_lock, flags))
3715 		return false;
3716 
3717 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
3718 		struct freelist_counters flc;
3719 		unsigned int slab_free;
3720 
3721 		if (!pfmemalloc_match(slab, pc->flags))
3722 			continue;
3723 
3724 		/*
3725 		 * determine the number of free objects in the slab racily
3726 		 *
3727 		 * slab_free is a lower bound due to possible subsequent
3728 		 * concurrent freeing, so the caller may get more objects than
3729 		 * requested and must handle that
3730 		 */
3731 		flc.counters = data_race(READ_ONCE(slab->counters));
3732 		slab_free = flc.objects - flc.inuse;
3733 
3734 		/* we have already min and this would get us over the max */
3735 		if (total_free >= pc->min_objects
3736 		    && total_free + slab_free > pc->max_objects)
3737 			break;
3738 
3739 		remove_partial(n, slab);
3740 
3741 		list_add(&slab->slab_list, &pc->slabs);
3742 
3743 		total_free += slab_free;
3744 		if (total_free >= pc->max_objects)
3745 			break;
3746 	}
3747 
3748 	spin_unlock_irqrestore(&n->list_lock, flags);
3749 	return total_free > 0;
3750 }
3751 
3752 /*
3753  * Try to allocate object from a partial slab on a specific node.
3754  */
3755 static void *get_from_partial_node(struct kmem_cache *s,
3756 				   struct kmem_cache_node *n,
3757 				   struct partial_context *pc)
3758 {
3759 	struct slab *slab, *slab2;
3760 	unsigned long flags;
3761 	void *object = NULL;
3762 
3763 	/*
3764 	 * Racy check. If we mistakenly see no partial slabs then we
3765 	 * just allocate an empty slab. If we mistakenly try to get a
3766 	 * partial slab and there is none available then get_from_partial()
3767 	 * will return NULL.
3768 	 */
3769 	if (!n || !n->nr_partial)
3770 		return NULL;
3771 
3772 	if (gfpflags_allow_spinning(pc->flags))
3773 		spin_lock_irqsave(&n->list_lock, flags);
3774 	else if (!spin_trylock_irqsave(&n->list_lock, flags))
3775 		return NULL;
3776 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
3777 
3778 		struct freelist_counters old, new;
3779 
3780 		if (!pfmemalloc_match(slab, pc->flags))
3781 			continue;
3782 
3783 		if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
3784 			object = alloc_single_from_partial(s, n, slab,
3785 							pc->orig_size);
3786 			if (object)
3787 				break;
3788 			continue;
3789 		}
3790 
3791 		/*
3792 		 * get a single object from the slab. This might race against
3793 		 * __slab_free(), which however has to take the list_lock if
3794 		 * it's about to make the slab fully free.
3795 		 */
3796 		do {
3797 			old.freelist = slab->freelist;
3798 			old.counters = slab->counters;
3799 
3800 			new.freelist = get_freepointer(s, old.freelist);
3801 			new.counters = old.counters;
3802 			new.inuse++;
3803 
3804 		} while (!__slab_update_freelist(s, slab, &old, &new, "get_from_partial_node"));
3805 
3806 		object = old.freelist;
3807 		if (!new.freelist)
3808 			remove_partial(n, slab);
3809 
3810 		break;
3811 	}
3812 	spin_unlock_irqrestore(&n->list_lock, flags);
3813 	return object;
3814 }
3815 
3816 /*
3817  * Get an object from somewhere. Search in increasing NUMA distances.
3818  */
3819 static void *get_from_any_partial(struct kmem_cache *s, struct partial_context *pc)
3820 {
3821 #ifdef CONFIG_NUMA
3822 	struct zonelist *zonelist;
3823 	struct zoneref *z;
3824 	struct zone *zone;
3825 	enum zone_type highest_zoneidx = gfp_zone(pc->flags);
3826 	unsigned int cpuset_mems_cookie;
3827 	bool allow_spin = gfpflags_allow_spinning(pc->flags);
3828 
3829 	/*
3830 	 * The defrag ratio allows a configuration of the tradeoffs between
3831 	 * inter node defragmentation and node local allocations. A lower
3832 	 * defrag_ratio increases the tendency to do local allocations
3833 	 * instead of attempting to obtain partial slabs from other nodes.
3834 	 *
3835 	 * If the defrag_ratio is set to 0 then kmalloc() always
3836 	 * returns node local objects. If the ratio is higher then kmalloc()
3837 	 * may return off node objects because partial slabs are obtained
3838 	 * from other nodes and filled up.
3839 	 *
3840 	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
3841 	 * (which makes defrag_ratio = 1000) then every (well almost)
3842 	 * allocation will first attempt to defrag slab caches on other nodes.
3843 	 * This means scanning over all nodes to look for partial slabs which
3844 	 * may be expensive if we do it every time we are trying to find a slab
3845 	 * with available objects.
3846 	 */
3847 	if (!s->remote_node_defrag_ratio ||
3848 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
3849 		return NULL;
3850 
3851 	do {
3852 		/*
3853 		 * read_mems_allowed_begin() accesses current->mems_allowed_seq,
3854 		 * a seqcount_spinlock_t that is not NMI-safe. Do not access
3855 		 * current->mems_allowed_seq and avoid retry when GFP flags
3856 		 * indicate spinning is not allowed.
3857 		 */
3858 		if (allow_spin)
3859 			cpuset_mems_cookie = read_mems_allowed_begin();
3860 
3861 		zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
3862 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3863 			struct kmem_cache_node *n;
3864 
3865 			n = get_node(s, zone_to_nid(zone));
3866 
3867 			if (n && cpuset_zone_allowed(zone, pc->flags) &&
3868 					n->nr_partial > s->min_partial) {
3869 
3870 				void *object = get_from_partial_node(s, n, pc);
3871 
3872 				if (object) {
3873 					/*
3874 					 * Don't check read_mems_allowed_retry()
3875 					 * here - if mems_allowed was updated in
3876 					 * parallel, that was a harmless race
3877 					 * between allocation and the cpuset
3878 					 * update
3879 					 */
3880 					return object;
3881 				}
3882 			}
3883 		}
3884 	} while (allow_spin && read_mems_allowed_retry(cpuset_mems_cookie));
3885 #endif	/* CONFIG_NUMA */
3886 	return NULL;
3887 }
3888 
3889 /*
3890  * Get an object from a partial slab
3891  */
3892 static void *get_from_partial(struct kmem_cache *s, int node,
3893 			      struct partial_context *pc)
3894 {
3895 	int searchnode = node;
3896 	void *object;
3897 
3898 	if (node == NUMA_NO_NODE)
3899 		searchnode = numa_mem_id();
3900 
3901 	object = get_from_partial_node(s, get_node(s, searchnode), pc);
3902 	if (object || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
3903 		return object;
3904 
3905 	return get_from_any_partial(s, pc);
3906 }
3907 
3908 static bool has_pcs_used(int cpu, struct kmem_cache *s)
3909 {
3910 	struct slub_percpu_sheaves *pcs;
3911 
3912 	if (!cache_has_sheaves(s))
3913 		return false;
3914 
3915 	pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3916 
3917 	return (pcs->spare || pcs->rcu_free || pcs->main->size);
3918 }
3919 
3920 /*
3921  * Flush percpu sheaves
3922  *
3923  * Called from CPU work handler with migration disabled.
3924  */
3925 static void flush_cpu_sheaves(struct work_struct *w)
3926 {
3927 	struct kmem_cache *s;
3928 	struct slub_flush_work *sfw;
3929 
3930 	sfw = container_of(w, struct slub_flush_work, work);
3931 
3932 	s = sfw->s;
3933 
3934 	if (cache_has_sheaves(s))
3935 		pcs_flush_all(s);
3936 }
3937 
3938 static void flush_all_cpus_locked(struct kmem_cache *s)
3939 {
3940 	struct slub_flush_work *sfw;
3941 	unsigned int cpu;
3942 
3943 	lockdep_assert_cpus_held();
3944 	mutex_lock(&flush_lock);
3945 
3946 	for_each_online_cpu(cpu) {
3947 		sfw = &per_cpu(slub_flush, cpu);
3948 		if (!has_pcs_used(cpu, s)) {
3949 			sfw->skip = true;
3950 			continue;
3951 		}
3952 		INIT_WORK(&sfw->work, flush_cpu_sheaves);
3953 		sfw->skip = false;
3954 		sfw->s = s;
3955 		queue_work_on(cpu, flushwq, &sfw->work);
3956 	}
3957 
3958 	for_each_online_cpu(cpu) {
3959 		sfw = &per_cpu(slub_flush, cpu);
3960 		if (sfw->skip)
3961 			continue;
3962 		flush_work(&sfw->work);
3963 	}
3964 
3965 	mutex_unlock(&flush_lock);
3966 }
3967 
3968 static void flush_all(struct kmem_cache *s)
3969 {
3970 	cpus_read_lock();
3971 	flush_all_cpus_locked(s);
3972 	cpus_read_unlock();
3973 }
3974 
3975 static void flush_rcu_sheaf(struct work_struct *w)
3976 {
3977 	struct slub_percpu_sheaves *pcs;
3978 	struct slab_sheaf *rcu_free;
3979 	struct slub_flush_work *sfw;
3980 	struct kmem_cache *s;
3981 
3982 	sfw = container_of(w, struct slub_flush_work, work);
3983 	s = sfw->s;
3984 
3985 	local_lock(&s->cpu_sheaves->lock);
3986 	pcs = this_cpu_ptr(s->cpu_sheaves);
3987 
3988 	rcu_free = pcs->rcu_free;
3989 	pcs->rcu_free = NULL;
3990 
3991 	local_unlock(&s->cpu_sheaves->lock);
3992 
3993 	if (rcu_free)
3994 		call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
3995 }
3996 
3997 
3998 /* needed for kvfree_rcu_barrier() */
3999 void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
4000 {
4001 	struct slub_flush_work *sfw;
4002 	unsigned int cpu;
4003 
4004 	mutex_lock(&flush_lock);
4005 
4006 	for_each_online_cpu(cpu) {
4007 		sfw = &per_cpu(slub_flush, cpu);
4008 
4009 		/*
4010 		 * we don't check if rcu_free sheaf exists - racing
4011 		 * __kfree_rcu_sheaf() might have just removed it.
4012 		 * by executing flush_rcu_sheaf() on the cpu we make
4013 		 * sure the __kfree_rcu_sheaf() finished its call_rcu()
4014 		 */
4015 
4016 		INIT_WORK(&sfw->work, flush_rcu_sheaf);
4017 		sfw->s = s;
4018 		queue_work_on(cpu, flushwq, &sfw->work);
4019 	}
4020 
4021 	for_each_online_cpu(cpu) {
4022 		sfw = &per_cpu(slub_flush, cpu);
4023 		flush_work(&sfw->work);
4024 	}
4025 
4026 	mutex_unlock(&flush_lock);
4027 }
4028 
4029 void flush_all_rcu_sheaves(void)
4030 {
4031 	struct kmem_cache *s;
4032 
4033 	cpus_read_lock();
4034 	mutex_lock(&slab_mutex);
4035 
4036 	list_for_each_entry(s, &slab_caches, list) {
4037 		if (!cache_has_sheaves(s))
4038 			continue;
4039 		flush_rcu_sheaves_on_cache(s);
4040 	}
4041 
4042 	mutex_unlock(&slab_mutex);
4043 	cpus_read_unlock();
4044 
4045 	rcu_barrier();
4046 }
4047 
4048 /*
4049  * Use the cpu notifier to insure that the cpu slabs are flushed when
4050  * necessary.
4051  */
4052 static int slub_cpu_dead(unsigned int cpu)
4053 {
4054 	struct kmem_cache *s;
4055 
4056 	mutex_lock(&slab_mutex);
4057 	list_for_each_entry(s, &slab_caches, list) {
4058 		if (cache_has_sheaves(s))
4059 			__pcs_flush_all_cpu(s, cpu);
4060 	}
4061 	mutex_unlock(&slab_mutex);
4062 	return 0;
4063 }
4064 
4065 #ifdef CONFIG_SLUB_DEBUG
4066 static int count_free(struct slab *slab)
4067 {
4068 	return slab->objects - slab->inuse;
4069 }
4070 
4071 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
4072 {
4073 	return atomic_long_read(&n->total_objects);
4074 }
4075 
4076 /* Supports checking bulk free of a constructed freelist */
4077 static inline bool free_debug_processing(struct kmem_cache *s,
4078 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
4079 	unsigned long addr, depot_stack_handle_t handle)
4080 {
4081 	bool checks_ok = false;
4082 	void *object = head;
4083 	int cnt = 0;
4084 
4085 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
4086 		if (!check_slab(s, slab))
4087 			goto out;
4088 	}
4089 
4090 	if (slab->inuse < *bulk_cnt) {
4091 		slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
4092 			 slab->inuse, *bulk_cnt);
4093 		goto out;
4094 	}
4095 
4096 next_object:
4097 
4098 	if (++cnt > *bulk_cnt)
4099 		goto out_cnt;
4100 
4101 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
4102 		if (!free_consistency_checks(s, slab, object, addr))
4103 			goto out;
4104 	}
4105 
4106 	if (s->flags & SLAB_STORE_USER)
4107 		set_track_update(s, object, TRACK_FREE, addr, handle);
4108 	trace(s, slab, object, 0);
4109 	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
4110 	init_object(s, object, SLUB_RED_INACTIVE);
4111 
4112 	/* Reached end of constructed freelist yet? */
4113 	if (object != tail) {
4114 		object = get_freepointer(s, object);
4115 		goto next_object;
4116 	}
4117 	checks_ok = true;
4118 
4119 out_cnt:
4120 	if (cnt != *bulk_cnt) {
4121 		slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
4122 			 *bulk_cnt, cnt);
4123 		*bulk_cnt = cnt;
4124 	}
4125 
4126 out:
4127 
4128 	if (!checks_ok)
4129 		slab_fix(s, "Object at 0x%p not freed", object);
4130 
4131 	return checks_ok;
4132 }
4133 #endif /* CONFIG_SLUB_DEBUG */
4134 
4135 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
4136 static unsigned long count_partial(struct kmem_cache_node *n,
4137 					int (*get_count)(struct slab *))
4138 {
4139 	unsigned long flags;
4140 	unsigned long x = 0;
4141 	struct slab *slab;
4142 
4143 	spin_lock_irqsave(&n->list_lock, flags);
4144 	list_for_each_entry(slab, &n->partial, slab_list)
4145 		x += get_count(slab);
4146 	spin_unlock_irqrestore(&n->list_lock, flags);
4147 	return x;
4148 }
4149 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
4150 
4151 #ifdef CONFIG_SLUB_DEBUG
4152 #define MAX_PARTIAL_TO_SCAN 10000
4153 
4154 static unsigned long count_partial_free_approx(struct kmem_cache_node *n)
4155 {
4156 	unsigned long flags;
4157 	unsigned long x = 0;
4158 	struct slab *slab;
4159 
4160 	spin_lock_irqsave(&n->list_lock, flags);
4161 	if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
4162 		list_for_each_entry(slab, &n->partial, slab_list)
4163 			x += slab->objects - slab->inuse;
4164 	} else {
4165 		/*
4166 		 * For a long list, approximate the total count of objects in
4167 		 * it to meet the limit on the number of slabs to scan.
4168 		 * Scan from both the list's head and tail for better accuracy.
4169 		 */
4170 		unsigned long scanned = 0;
4171 
4172 		list_for_each_entry(slab, &n->partial, slab_list) {
4173 			x += slab->objects - slab->inuse;
4174 			if (++scanned == MAX_PARTIAL_TO_SCAN / 2)
4175 				break;
4176 		}
4177 		list_for_each_entry_reverse(slab, &n->partial, slab_list) {
4178 			x += slab->objects - slab->inuse;
4179 			if (++scanned == MAX_PARTIAL_TO_SCAN)
4180 				break;
4181 		}
4182 		x = mult_frac(x, n->nr_partial, scanned);
4183 		x = min(x, node_nr_objs(n));
4184 	}
4185 	spin_unlock_irqrestore(&n->list_lock, flags);
4186 	return x;
4187 }
4188 
4189 static noinline void
4190 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
4191 {
4192 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
4193 				      DEFAULT_RATELIMIT_BURST);
4194 	int cpu = raw_smp_processor_id();
4195 	int node;
4196 	struct kmem_cache_node *n;
4197 
4198 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
4199 		return;
4200 
4201 	pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n",
4202 		cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
4203 	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
4204 		s->name, s->object_size, s->size, oo_order(s->oo),
4205 		oo_order(s->min));
4206 
4207 	if (oo_order(s->min) > get_order(s->object_size))
4208 		pr_warn("  %s debugging increased min order, use slab_debug=O to disable.\n",
4209 			s->name);
4210 
4211 	for_each_kmem_cache_node(s, node, n) {
4212 		unsigned long nr_slabs;
4213 		unsigned long nr_objs;
4214 		unsigned long nr_free;
4215 
4216 		nr_free  = count_partial_free_approx(n);
4217 		nr_slabs = node_nr_slabs(n);
4218 		nr_objs  = node_nr_objs(n);
4219 
4220 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
4221 			node, nr_slabs, nr_objs, nr_free);
4222 	}
4223 }
4224 #else /* CONFIG_SLUB_DEBUG */
4225 static inline void
4226 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
4227 #endif
4228 
4229 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
4230 {
4231 	if (unlikely(slab_test_pfmemalloc(slab)))
4232 		return gfp_pfmemalloc_allowed(gfpflags);
4233 
4234 	return true;
4235 }
4236 
4237 /*
4238  * Get the slab's freelist and do not freeze it.
4239  *
4240  * Assumes the slab is isolated from node partial list and not frozen.
4241  *
4242  * Assumes this is performed only for caches without debugging so we
4243  * don't need to worry about adding the slab to the full list.
4244  */
4245 static inline void *get_freelist_nofreeze(struct kmem_cache *s, struct slab *slab)
4246 {
4247 	struct freelist_counters old, new;
4248 
4249 	do {
4250 		old.freelist = slab->freelist;
4251 		old.counters = slab->counters;
4252 
4253 		new.freelist = NULL;
4254 		new.counters = old.counters;
4255 		VM_WARN_ON_ONCE(new.frozen);
4256 
4257 		new.inuse = old.objects;
4258 
4259 	} while (!slab_update_freelist(s, slab, &old, &new, "get_freelist_nofreeze"));
4260 
4261 	return old.freelist;
4262 }
4263 
4264 /*
4265  * If the object has been wiped upon free, make sure it's fully initialized by
4266  * zeroing out freelist pointer.
4267  *
4268  * Note that we also wipe custom freelist pointers.
4269  */
4270 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
4271 						   void *obj)
4272 {
4273 	if (unlikely(slab_want_init_on_free(s)) && obj &&
4274 	    !freeptr_outside_object(s))
4275 		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
4276 			0, sizeof(void *));
4277 }
4278 
4279 static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
4280 		void **p, unsigned int count, bool allow_spin)
4281 {
4282 	unsigned int allocated = 0;
4283 	struct kmem_cache_node *n;
4284 	bool needs_add_partial;
4285 	unsigned long flags;
4286 	void *object;
4287 
4288 	/*
4289 	 * Are we going to put the slab on the partial list?
4290 	 * Note slab->inuse is 0 on a new slab.
4291 	 */
4292 	needs_add_partial = (slab->objects > count);
4293 
4294 	if (!allow_spin && needs_add_partial) {
4295 
4296 		n = get_node(s, slab_nid(slab));
4297 
4298 		if (!spin_trylock_irqsave(&n->list_lock, flags)) {
4299 			/* Unlucky, discard newly allocated slab */
4300 			free_new_slab_nolock(s, slab);
4301 			return 0;
4302 		}
4303 	}
4304 
4305 	object = slab->freelist;
4306 	while (object && allocated < count) {
4307 		p[allocated] = object;
4308 		object = get_freepointer(s, object);
4309 		maybe_wipe_obj_freeptr(s, p[allocated]);
4310 
4311 		slab->inuse++;
4312 		allocated++;
4313 	}
4314 	slab->freelist = object;
4315 
4316 	if (needs_add_partial) {
4317 
4318 		if (allow_spin) {
4319 			n = get_node(s, slab_nid(slab));
4320 			spin_lock_irqsave(&n->list_lock, flags);
4321 		}
4322 		add_partial(n, slab, ADD_TO_HEAD);
4323 		spin_unlock_irqrestore(&n->list_lock, flags);
4324 	}
4325 
4326 	inc_slabs_node(s, slab_nid(slab), slab->objects);
4327 	return allocated;
4328 }
4329 
4330 /*
4331  * Slow path. We failed to allocate via percpu sheaves or they are not available
4332  * due to bootstrap or debugging enabled or SLUB_TINY.
4333  *
4334  * We try to allocate from partial slab lists and fall back to allocating a new
4335  * slab.
4336  */
4337 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
4338 			   unsigned long addr, unsigned int orig_size)
4339 {
4340 	bool allow_spin = gfpflags_allow_spinning(gfpflags);
4341 	void *object;
4342 	struct slab *slab;
4343 	struct partial_context pc;
4344 	bool try_thisnode = true;
4345 
4346 	stat(s, ALLOC_SLOWPATH);
4347 
4348 new_objects:
4349 
4350 	pc.flags = gfpflags;
4351 	/*
4352 	 * When a preferred node is indicated but no __GFP_THISNODE
4353 	 *
4354 	 * 1) try to get a partial slab from target node only by having
4355 	 *    __GFP_THISNODE in pc.flags for get_from_partial()
4356 	 * 2) if 1) failed, try to allocate a new slab from target node with
4357 	 *    GPF_NOWAIT | __GFP_THISNODE opportunistically
4358 	 * 3) if 2) failed, retry with original gfpflags which will allow
4359 	 *    get_from_partial() try partial lists of other nodes before
4360 	 *    potentially allocating new page from other nodes
4361 	 */
4362 	if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
4363 		     && try_thisnode)) {
4364 		if (unlikely(!allow_spin))
4365 			/* Do not upgrade gfp to NOWAIT from more restrictive mode */
4366 			pc.flags = gfpflags | __GFP_THISNODE;
4367 		else
4368 			pc.flags = GFP_NOWAIT | __GFP_THISNODE;
4369 	}
4370 
4371 	pc.orig_size = orig_size;
4372 	object = get_from_partial(s, node, &pc);
4373 	if (object)
4374 		goto success;
4375 
4376 	slab = new_slab(s, pc.flags, node);
4377 
4378 	if (unlikely(!slab)) {
4379 		if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
4380 		    && try_thisnode) {
4381 			try_thisnode = false;
4382 			goto new_objects;
4383 		}
4384 		slab_out_of_memory(s, gfpflags, node);
4385 		return NULL;
4386 	}
4387 
4388 	stat(s, ALLOC_SLAB);
4389 
4390 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4391 		object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
4392 
4393 		if (likely(object))
4394 			goto success;
4395 	} else {
4396 		alloc_from_new_slab(s, slab, &object, 1, allow_spin);
4397 
4398 		/* we don't need to check SLAB_STORE_USER here */
4399 		if (likely(object))
4400 			return object;
4401 	}
4402 
4403 	if (allow_spin)
4404 		goto new_objects;
4405 
4406 	/* This could cause an endless loop. Fail instead. */
4407 	return NULL;
4408 
4409 success:
4410 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
4411 		set_track(s, object, TRACK_ALLOC, addr, gfpflags);
4412 
4413 	return object;
4414 }
4415 
4416 static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
4417 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4418 {
4419 	void *object;
4420 
4421 #ifdef CONFIG_NUMA
4422 	if (static_branch_unlikely(&strict_numa) &&
4423 			node == NUMA_NO_NODE) {
4424 
4425 		struct mempolicy *mpol = current->mempolicy;
4426 
4427 		if (mpol) {
4428 			/*
4429 			 * Special BIND rule support. If the local node
4430 			 * is in permitted set then do not redirect
4431 			 * to a particular node.
4432 			 * Otherwise we apply the memory policy to get
4433 			 * the node we need to allocate on.
4434 			 */
4435 			if (mpol->mode != MPOL_BIND ||
4436 					!node_isset(numa_mem_id(), mpol->nodes))
4437 				node = mempolicy_slab_node();
4438 		}
4439 	}
4440 #endif
4441 
4442 	object = ___slab_alloc(s, gfpflags, node, addr, orig_size);
4443 
4444 	return object;
4445 }
4446 
4447 static __fastpath_inline
4448 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
4449 {
4450 	flags &= gfp_allowed_mask;
4451 
4452 	might_alloc(flags);
4453 
4454 	if (unlikely(should_failslab(s, flags)))
4455 		return NULL;
4456 
4457 	return s;
4458 }
4459 
4460 static __fastpath_inline
4461 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
4462 			  gfp_t flags, size_t size, void **p, bool init,
4463 			  unsigned int orig_size)
4464 {
4465 	unsigned int zero_size = s->object_size;
4466 	bool kasan_init = init;
4467 	size_t i;
4468 	gfp_t init_flags = flags & gfp_allowed_mask;
4469 
4470 	/*
4471 	 * For kmalloc object, the allocated memory size(object_size) is likely
4472 	 * larger than the requested size(orig_size). If redzone check is
4473 	 * enabled for the extra space, don't zero it, as it will be redzoned
4474 	 * soon. The redzone operation for this extra space could be seen as a
4475 	 * replacement of current poisoning under certain debug option, and
4476 	 * won't break other sanity checks.
4477 	 */
4478 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
4479 	    (s->flags & SLAB_KMALLOC))
4480 		zero_size = orig_size;
4481 
4482 	/*
4483 	 * When slab_debug is enabled, avoid memory initialization integrated
4484 	 * into KASAN and instead zero out the memory via the memset below with
4485 	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
4486 	 * cause false-positive reports. This does not lead to a performance
4487 	 * penalty on production builds, as slab_debug is not intended to be
4488 	 * enabled there.
4489 	 */
4490 	if (__slub_debug_enabled())
4491 		kasan_init = false;
4492 
4493 	/*
4494 	 * As memory initialization might be integrated into KASAN,
4495 	 * kasan_slab_alloc and initialization memset must be
4496 	 * kept together to avoid discrepancies in behavior.
4497 	 *
4498 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
4499 	 */
4500 	for (i = 0; i < size; i++) {
4501 		p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
4502 		if (p[i] && init && (!kasan_init ||
4503 				     !kasan_has_integrated_init()))
4504 			memset(p[i], 0, zero_size);
4505 		if (gfpflags_allow_spinning(flags))
4506 			kmemleak_alloc_recursive(p[i], s->object_size, 1,
4507 						 s->flags, init_flags);
4508 		kmsan_slab_alloc(s, p[i], init_flags);
4509 		alloc_tagging_slab_alloc_hook(s, p[i], flags);
4510 	}
4511 
4512 	return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
4513 }
4514 
4515 /*
4516  * Replace the empty main sheaf with a (at least partially) full sheaf.
4517  *
4518  * Must be called with the cpu_sheaves local lock locked. If successful, returns
4519  * the pcs pointer and the local lock locked (possibly on a different cpu than
4520  * initially called). If not successful, returns NULL and the local lock
4521  * unlocked.
4522  */
4523 static struct slub_percpu_sheaves *
4524 __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, gfp_t gfp)
4525 {
4526 	struct slab_sheaf *empty = NULL;
4527 	struct slab_sheaf *full;
4528 	struct node_barn *barn;
4529 	bool can_alloc;
4530 
4531 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
4532 
4533 	/* Bootstrap or debug cache, back off */
4534 	if (unlikely(!cache_has_sheaves(s))) {
4535 		local_unlock(&s->cpu_sheaves->lock);
4536 		return NULL;
4537 	}
4538 
4539 	if (pcs->spare && pcs->spare->size > 0) {
4540 		swap(pcs->main, pcs->spare);
4541 		return pcs;
4542 	}
4543 
4544 	barn = get_barn(s);
4545 	if (!barn) {
4546 		local_unlock(&s->cpu_sheaves->lock);
4547 		return NULL;
4548 	}
4549 
4550 	full = barn_replace_empty_sheaf(barn, pcs->main,
4551 					gfpflags_allow_spinning(gfp));
4552 
4553 	if (full) {
4554 		stat(s, BARN_GET);
4555 		pcs->main = full;
4556 		return pcs;
4557 	}
4558 
4559 	stat(s, BARN_GET_FAIL);
4560 
4561 	can_alloc = gfpflags_allow_blocking(gfp);
4562 
4563 	if (can_alloc) {
4564 		if (pcs->spare) {
4565 			empty = pcs->spare;
4566 			pcs->spare = NULL;
4567 		} else {
4568 			empty = barn_get_empty_sheaf(barn, true);
4569 		}
4570 	}
4571 
4572 	local_unlock(&s->cpu_sheaves->lock);
4573 
4574 	if (!can_alloc)
4575 		return NULL;
4576 
4577 	if (empty) {
4578 		if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
4579 			full = empty;
4580 		} else {
4581 			/*
4582 			 * we must be very low on memory so don't bother
4583 			 * with the barn
4584 			 */
4585 			free_empty_sheaf(s, empty);
4586 		}
4587 	} else {
4588 		full = alloc_full_sheaf(s, gfp);
4589 	}
4590 
4591 	if (!full)
4592 		return NULL;
4593 
4594 	/*
4595 	 * we can reach here only when gfpflags_allow_blocking
4596 	 * so this must not be an irq
4597 	 */
4598 	local_lock(&s->cpu_sheaves->lock);
4599 	pcs = this_cpu_ptr(s->cpu_sheaves);
4600 
4601 	/*
4602 	 * If we are returning empty sheaf, we either got it from the
4603 	 * barn or had to allocate one. If we are returning a full
4604 	 * sheaf, it's due to racing or being migrated to a different
4605 	 * cpu. Breaching the barn's sheaf limits should be thus rare
4606 	 * enough so just ignore them to simplify the recovery.
4607 	 */
4608 
4609 	if (pcs->main->size == 0) {
4610 		if (!pcs->spare)
4611 			pcs->spare = pcs->main;
4612 		else
4613 			barn_put_empty_sheaf(barn, pcs->main);
4614 		pcs->main = full;
4615 		return pcs;
4616 	}
4617 
4618 	if (!pcs->spare) {
4619 		pcs->spare = full;
4620 		return pcs;
4621 	}
4622 
4623 	if (pcs->spare->size == 0) {
4624 		barn_put_empty_sheaf(barn, pcs->spare);
4625 		pcs->spare = full;
4626 		return pcs;
4627 	}
4628 
4629 	barn_put_full_sheaf(barn, full);
4630 	stat(s, BARN_PUT);
4631 
4632 	return pcs;
4633 }
4634 
4635 static __fastpath_inline
4636 void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
4637 {
4638 	struct slub_percpu_sheaves *pcs;
4639 	bool node_requested;
4640 	void *object;
4641 
4642 #ifdef CONFIG_NUMA
4643 	if (static_branch_unlikely(&strict_numa) &&
4644 			 node == NUMA_NO_NODE) {
4645 
4646 		struct mempolicy *mpol = current->mempolicy;
4647 
4648 		if (mpol) {
4649 			/*
4650 			 * Special BIND rule support. If the local node
4651 			 * is in permitted set then do not redirect
4652 			 * to a particular node.
4653 			 * Otherwise we apply the memory policy to get
4654 			 * the node we need to allocate on.
4655 			 */
4656 			if (mpol->mode != MPOL_BIND ||
4657 					!node_isset(numa_mem_id(), mpol->nodes))
4658 
4659 				node = mempolicy_slab_node();
4660 		}
4661 	}
4662 #endif
4663 
4664 	node_requested = IS_ENABLED(CONFIG_NUMA) && node != NUMA_NO_NODE;
4665 
4666 	/*
4667 	 * We assume the percpu sheaves contain only local objects although it's
4668 	 * not completely guaranteed, so we verify later.
4669 	 */
4670 	if (unlikely(node_requested && node != numa_mem_id())) {
4671 		stat(s, ALLOC_NODE_MISMATCH);
4672 		return NULL;
4673 	}
4674 
4675 	if (!local_trylock(&s->cpu_sheaves->lock))
4676 		return NULL;
4677 
4678 	pcs = this_cpu_ptr(s->cpu_sheaves);
4679 
4680 	if (unlikely(pcs->main->size == 0)) {
4681 		pcs = __pcs_replace_empty_main(s, pcs, gfp);
4682 		if (unlikely(!pcs))
4683 			return NULL;
4684 	}
4685 
4686 	object = pcs->main->objects[pcs->main->size - 1];
4687 
4688 	if (unlikely(node_requested)) {
4689 		/*
4690 		 * Verify that the object was from the node we want. This could
4691 		 * be false because of cpu migration during an unlocked part of
4692 		 * the current allocation or previous freeing process.
4693 		 */
4694 		if (page_to_nid(virt_to_page(object)) != node) {
4695 			local_unlock(&s->cpu_sheaves->lock);
4696 			stat(s, ALLOC_NODE_MISMATCH);
4697 			return NULL;
4698 		}
4699 	}
4700 
4701 	pcs->main->size--;
4702 
4703 	local_unlock(&s->cpu_sheaves->lock);
4704 
4705 	stat(s, ALLOC_FASTPATH);
4706 
4707 	return object;
4708 }
4709 
4710 static __fastpath_inline
4711 unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, gfp_t gfp, size_t size,
4712 				 void **p)
4713 {
4714 	struct slub_percpu_sheaves *pcs;
4715 	struct slab_sheaf *main;
4716 	unsigned int allocated = 0;
4717 	unsigned int batch;
4718 
4719 next_batch:
4720 	if (!local_trylock(&s->cpu_sheaves->lock))
4721 		return allocated;
4722 
4723 	pcs = this_cpu_ptr(s->cpu_sheaves);
4724 
4725 	if (unlikely(pcs->main->size == 0)) {
4726 
4727 		struct slab_sheaf *full;
4728 		struct node_barn *barn;
4729 
4730 		if (unlikely(!cache_has_sheaves(s))) {
4731 			local_unlock(&s->cpu_sheaves->lock);
4732 			return allocated;
4733 		}
4734 
4735 		if (pcs->spare && pcs->spare->size > 0) {
4736 			swap(pcs->main, pcs->spare);
4737 			goto do_alloc;
4738 		}
4739 
4740 		barn = get_barn(s);
4741 		if (!barn) {
4742 			local_unlock(&s->cpu_sheaves->lock);
4743 			return allocated;
4744 		}
4745 
4746 		full = barn_replace_empty_sheaf(barn, pcs->main,
4747 						gfpflags_allow_spinning(gfp));
4748 
4749 		if (full) {
4750 			stat(s, BARN_GET);
4751 			pcs->main = full;
4752 			goto do_alloc;
4753 		}
4754 
4755 		stat(s, BARN_GET_FAIL);
4756 
4757 		local_unlock(&s->cpu_sheaves->lock);
4758 
4759 		/*
4760 		 * Once full sheaves in barn are depleted, let the bulk
4761 		 * allocation continue from slab pages, otherwise we would just
4762 		 * be copying arrays of pointers twice.
4763 		 */
4764 		return allocated;
4765 	}
4766 
4767 do_alloc:
4768 
4769 	main = pcs->main;
4770 	batch = min(size, main->size);
4771 
4772 	main->size -= batch;
4773 	memcpy(p, main->objects + main->size, batch * sizeof(void *));
4774 
4775 	local_unlock(&s->cpu_sheaves->lock);
4776 
4777 	stat_add(s, ALLOC_FASTPATH, batch);
4778 
4779 	allocated += batch;
4780 
4781 	if (batch < size) {
4782 		p += batch;
4783 		size -= batch;
4784 		goto next_batch;
4785 	}
4786 
4787 	return allocated;
4788 }
4789 
4790 
4791 /*
4792  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
4793  * have the fastpath folded into their functions. So no function call
4794  * overhead for requests that can be satisfied on the fastpath.
4795  *
4796  * The fastpath works by first checking if the lockless freelist can be used.
4797  * If not then __slab_alloc is called for slow processing.
4798  *
4799  * Otherwise we can simply pick the next object from the lockless free list.
4800  */
4801 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
4802 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4803 {
4804 	void *object;
4805 	bool init = false;
4806 
4807 	s = slab_pre_alloc_hook(s, gfpflags);
4808 	if (unlikely(!s))
4809 		return NULL;
4810 
4811 	object = kfence_alloc(s, orig_size, gfpflags);
4812 	if (unlikely(object))
4813 		goto out;
4814 
4815 	object = alloc_from_pcs(s, gfpflags, node);
4816 
4817 	if (!object)
4818 		object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
4819 
4820 	maybe_wipe_obj_freeptr(s, object);
4821 	init = slab_want_init_on_alloc(gfpflags, s);
4822 
4823 out:
4824 	/*
4825 	 * When init equals 'true', like for kzalloc() family, only
4826 	 * @orig_size bytes might be zeroed instead of s->object_size
4827 	 * In case this fails due to memcg_slab_post_alloc_hook(),
4828 	 * object is set to NULL
4829 	 */
4830 	slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
4831 
4832 	return object;
4833 }
4834 
4835 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
4836 {
4837 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
4838 				    s->object_size);
4839 
4840 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4841 
4842 	return ret;
4843 }
4844 EXPORT_SYMBOL(kmem_cache_alloc_noprof);
4845 
4846 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
4847 			   gfp_t gfpflags)
4848 {
4849 	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
4850 				    s->object_size);
4851 
4852 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4853 
4854 	return ret;
4855 }
4856 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
4857 
4858 bool kmem_cache_charge(void *objp, gfp_t gfpflags)
4859 {
4860 	if (!memcg_kmem_online())
4861 		return true;
4862 
4863 	return memcg_slab_post_charge(objp, gfpflags);
4864 }
4865 EXPORT_SYMBOL(kmem_cache_charge);
4866 
4867 /**
4868  * kmem_cache_alloc_node - Allocate an object on the specified node
4869  * @s: The cache to allocate from.
4870  * @gfpflags: See kmalloc().
4871  * @node: node number of the target node.
4872  *
4873  * Identical to kmem_cache_alloc but it will allocate memory on the given
4874  * node, which can improve the performance for cpu bound structures.
4875  *
4876  * Fallback to other node is possible if __GFP_THISNODE is not set.
4877  *
4878  * Return: pointer to the new object or %NULL in case of error
4879  */
4880 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
4881 {
4882 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4883 
4884 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
4885 
4886 	return ret;
4887 }
4888 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4889 
4890 static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
4891 				      struct slab_sheaf *sheaf, gfp_t gfp)
4892 {
4893 	int ret = 0;
4894 
4895 	ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
4896 
4897 	if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
4898 		return ret;
4899 
4900 	/*
4901 	 * if we are allowed to, refill sheaf with pfmemalloc but then remember
4902 	 * it for when it's returned
4903 	 */
4904 	ret = refill_sheaf(s, sheaf, gfp);
4905 	sheaf->pfmemalloc = true;
4906 
4907 	return ret;
4908 }
4909 
4910 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
4911 				   size_t size, void **p);
4912 
4913 /*
4914  * returns a sheaf that has at least the requested size
4915  * when prefilling is needed, do so with given gfp flags
4916  *
4917  * return NULL if sheaf allocation or prefilling failed
4918  */
4919 struct slab_sheaf *
4920 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
4921 {
4922 	struct slub_percpu_sheaves *pcs;
4923 	struct slab_sheaf *sheaf = NULL;
4924 	struct node_barn *barn;
4925 
4926 	if (unlikely(!size))
4927 		return NULL;
4928 
4929 	if (unlikely(size > s->sheaf_capacity)) {
4930 
4931 		sheaf = kzalloc(struct_size(sheaf, objects, size), gfp);
4932 		if (!sheaf)
4933 			return NULL;
4934 
4935 		stat(s, SHEAF_PREFILL_OVERSIZE);
4936 		sheaf->cache = s;
4937 		sheaf->capacity = size;
4938 
4939 		/*
4940 		 * we do not need to care about pfmemalloc here because oversize
4941 		 * sheaves area always flushed and freed when returned
4942 		 */
4943 		if (!__kmem_cache_alloc_bulk(s, gfp, size,
4944 					     &sheaf->objects[0])) {
4945 			kfree(sheaf);
4946 			return NULL;
4947 		}
4948 
4949 		sheaf->size = size;
4950 
4951 		return sheaf;
4952 	}
4953 
4954 	local_lock(&s->cpu_sheaves->lock);
4955 	pcs = this_cpu_ptr(s->cpu_sheaves);
4956 
4957 	if (pcs->spare) {
4958 		sheaf = pcs->spare;
4959 		pcs->spare = NULL;
4960 		stat(s, SHEAF_PREFILL_FAST);
4961 	} else {
4962 		barn = get_barn(s);
4963 
4964 		stat(s, SHEAF_PREFILL_SLOW);
4965 		if (barn)
4966 			sheaf = barn_get_full_or_empty_sheaf(barn);
4967 		if (sheaf && sheaf->size)
4968 			stat(s, BARN_GET);
4969 		else
4970 			stat(s, BARN_GET_FAIL);
4971 	}
4972 
4973 	local_unlock(&s->cpu_sheaves->lock);
4974 
4975 
4976 	if (!sheaf)
4977 		sheaf = alloc_empty_sheaf(s, gfp);
4978 
4979 	if (sheaf) {
4980 		sheaf->capacity = s->sheaf_capacity;
4981 		sheaf->pfmemalloc = false;
4982 
4983 		if (sheaf->size < size &&
4984 		    __prefill_sheaf_pfmemalloc(s, sheaf, gfp)) {
4985 			sheaf_flush_unused(s, sheaf);
4986 			free_empty_sheaf(s, sheaf);
4987 			sheaf = NULL;
4988 		}
4989 	}
4990 
4991 	return sheaf;
4992 }
4993 
4994 /*
4995  * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf()
4996  *
4997  * If the sheaf cannot simply become the percpu spare sheaf, but there's space
4998  * for a full sheaf in the barn, we try to refill the sheaf back to the cache's
4999  * sheaf_capacity to avoid handling partially full sheaves.
5000  *
5001  * If the refill fails because gfp is e.g. GFP_NOWAIT, or the barn is full, the
5002  * sheaf is instead flushed and freed.
5003  */
5004 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
5005 			     struct slab_sheaf *sheaf)
5006 {
5007 	struct slub_percpu_sheaves *pcs;
5008 	struct node_barn *barn;
5009 
5010 	if (unlikely((sheaf->capacity != s->sheaf_capacity)
5011 		     || sheaf->pfmemalloc)) {
5012 		sheaf_flush_unused(s, sheaf);
5013 		kfree(sheaf);
5014 		return;
5015 	}
5016 
5017 	local_lock(&s->cpu_sheaves->lock);
5018 	pcs = this_cpu_ptr(s->cpu_sheaves);
5019 	barn = get_barn(s);
5020 
5021 	if (!pcs->spare) {
5022 		pcs->spare = sheaf;
5023 		sheaf = NULL;
5024 		stat(s, SHEAF_RETURN_FAST);
5025 	}
5026 
5027 	local_unlock(&s->cpu_sheaves->lock);
5028 
5029 	if (!sheaf)
5030 		return;
5031 
5032 	stat(s, SHEAF_RETURN_SLOW);
5033 
5034 	/*
5035 	 * If the barn has too many full sheaves or we fail to refill the sheaf,
5036 	 * simply flush and free it.
5037 	 */
5038 	if (!barn || data_race(barn->nr_full) >= MAX_FULL_SHEAVES ||
5039 	    refill_sheaf(s, sheaf, gfp)) {
5040 		sheaf_flush_unused(s, sheaf);
5041 		free_empty_sheaf(s, sheaf);
5042 		return;
5043 	}
5044 
5045 	barn_put_full_sheaf(barn, sheaf);
5046 	stat(s, BARN_PUT);
5047 }
5048 
5049 /*
5050  * refill a sheaf previously returned by kmem_cache_prefill_sheaf to at least
5051  * the given size
5052  *
5053  * the sheaf might be replaced by a new one when requesting more than
5054  * s->sheaf_capacity objects if such replacement is necessary, but the refill
5055  * fails (returning -ENOMEM), the existing sheaf is left intact
5056  *
5057  * In practice we always refill to full sheaf's capacity.
5058  */
5059 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
5060 			    struct slab_sheaf **sheafp, unsigned int size)
5061 {
5062 	struct slab_sheaf *sheaf;
5063 
5064 	/*
5065 	 * TODO: do we want to support *sheaf == NULL to be equivalent of
5066 	 * kmem_cache_prefill_sheaf() ?
5067 	 */
5068 	if (!sheafp || !(*sheafp))
5069 		return -EINVAL;
5070 
5071 	sheaf = *sheafp;
5072 	if (sheaf->size >= size)
5073 		return 0;
5074 
5075 	if (likely(sheaf->capacity >= size)) {
5076 		if (likely(sheaf->capacity == s->sheaf_capacity))
5077 			return __prefill_sheaf_pfmemalloc(s, sheaf, gfp);
5078 
5079 		if (!__kmem_cache_alloc_bulk(s, gfp, sheaf->capacity - sheaf->size,
5080 					     &sheaf->objects[sheaf->size])) {
5081 			return -ENOMEM;
5082 		}
5083 		sheaf->size = sheaf->capacity;
5084 
5085 		return 0;
5086 	}
5087 
5088 	/*
5089 	 * We had a regular sized sheaf and need an oversize one, or we had an
5090 	 * oversize one already but need a larger one now.
5091 	 * This should be a very rare path so let's not complicate it.
5092 	 */
5093 	sheaf = kmem_cache_prefill_sheaf(s, gfp, size);
5094 	if (!sheaf)
5095 		return -ENOMEM;
5096 
5097 	kmem_cache_return_sheaf(s, gfp, *sheafp);
5098 	*sheafp = sheaf;
5099 	return 0;
5100 }
5101 
5102 /*
5103  * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf()
5104  *
5105  * Guaranteed not to fail as many allocations as was the requested size.
5106  * After the sheaf is emptied, it fails - no fallback to the slab cache itself.
5107  *
5108  * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT
5109  * memcg charging is forced over limit if necessary, to avoid failure.
5110  *
5111  * It is possible that the allocation comes from kfence and then the sheaf
5112  * size is not decreased.
5113  */
5114 void *
5115 kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
5116 				   struct slab_sheaf *sheaf)
5117 {
5118 	void *ret = NULL;
5119 	bool init;
5120 
5121 	if (sheaf->size == 0)
5122 		goto out;
5123 
5124 	ret = kfence_alloc(s, s->object_size, gfp);
5125 
5126 	if (likely(!ret))
5127 		ret = sheaf->objects[--sheaf->size];
5128 
5129 	init = slab_want_init_on_alloc(gfp, s);
5130 
5131 	/* add __GFP_NOFAIL to force successful memcg charging */
5132 	slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size);
5133 out:
5134 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE);
5135 
5136 	return ret;
5137 }
5138 
5139 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
5140 {
5141 	return sheaf->size;
5142 }
5143 /*
5144  * To avoid unnecessary overhead, we pass through large allocation requests
5145  * directly to the page allocator. We use __GFP_COMP, because we will need to
5146  * know the allocation order to free the pages properly in kfree.
5147  */
5148 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
5149 {
5150 	struct page *page;
5151 	void *ptr = NULL;
5152 	unsigned int order = get_order(size);
5153 
5154 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
5155 		flags = kmalloc_fix_flags(flags);
5156 
5157 	flags |= __GFP_COMP;
5158 
5159 	if (node == NUMA_NO_NODE)
5160 		page = alloc_frozen_pages_noprof(flags, order);
5161 	else
5162 		page = __alloc_frozen_pages_noprof(flags, order, node, NULL);
5163 
5164 	if (page) {
5165 		ptr = page_address(page);
5166 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
5167 				      PAGE_SIZE << order);
5168 		__SetPageLargeKmalloc(page);
5169 	}
5170 
5171 	ptr = kasan_kmalloc_large(ptr, size, flags);
5172 	/* As ptr might get tagged, call kmemleak hook after KASAN. */
5173 	kmemleak_alloc(ptr, size, 1, flags);
5174 	kmsan_kmalloc_large(ptr, size, flags);
5175 
5176 	return ptr;
5177 }
5178 
5179 void *__kmalloc_large_noprof(size_t size, gfp_t flags)
5180 {
5181 	void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
5182 
5183 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
5184 		      flags, NUMA_NO_NODE);
5185 	return ret;
5186 }
5187 EXPORT_SYMBOL(__kmalloc_large_noprof);
5188 
5189 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
5190 {
5191 	void *ret = ___kmalloc_large_node(size, flags, node);
5192 
5193 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
5194 		      flags, node);
5195 	return ret;
5196 }
5197 EXPORT_SYMBOL(__kmalloc_large_node_noprof);
5198 
5199 static __always_inline
5200 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
5201 			unsigned long caller)
5202 {
5203 	struct kmem_cache *s;
5204 	void *ret;
5205 
5206 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5207 		ret = __kmalloc_large_node_noprof(size, flags, node);
5208 		trace_kmalloc(caller, ret, size,
5209 			      PAGE_SIZE << get_order(size), flags, node);
5210 		return ret;
5211 	}
5212 
5213 	if (unlikely(!size))
5214 		return ZERO_SIZE_PTR;
5215 
5216 	s = kmalloc_slab(size, b, flags, caller);
5217 
5218 	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
5219 	ret = kasan_kmalloc(s, ret, size, flags);
5220 	trace_kmalloc(caller, ret, size, s->size, flags, node);
5221 	return ret;
5222 }
5223 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
5224 {
5225 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
5226 }
5227 EXPORT_SYMBOL(__kmalloc_node_noprof);
5228 
5229 void *__kmalloc_noprof(size_t size, gfp_t flags)
5230 {
5231 	return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
5232 }
5233 EXPORT_SYMBOL(__kmalloc_noprof);
5234 
5235 /**
5236  * kmalloc_nolock - Allocate an object of given size from any context.
5237  * @size: size to allocate
5238  * @gfp_flags: GFP flags. Only __GFP_ACCOUNT, __GFP_ZERO, __GFP_NO_OBJ_EXT
5239  * allowed.
5240  * @node: node number of the target node.
5241  *
5242  * Return: pointer to the new object or NULL in case of error.
5243  * NULL does not mean EBUSY or EAGAIN. It means ENOMEM.
5244  * There is no reason to call it again and expect !NULL.
5245  */
5246 void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
5247 {
5248 	gfp_t alloc_gfp = __GFP_NOWARN | __GFP_NOMEMALLOC | gfp_flags;
5249 	struct kmem_cache *s;
5250 	bool can_retry = true;
5251 	void *ret;
5252 
5253 	VM_WARN_ON_ONCE(gfp_flags & ~(__GFP_ACCOUNT | __GFP_ZERO |
5254 				      __GFP_NO_OBJ_EXT));
5255 
5256 	if (unlikely(!size))
5257 		return ZERO_SIZE_PTR;
5258 
5259 	/*
5260 	 * See the comment for the same check in
5261 	 * alloc_frozen_pages_nolock_noprof()
5262 	 */
5263 	if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
5264 		return NULL;
5265 
5266 retry:
5267 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
5268 		return NULL;
5269 	s = kmalloc_slab(size, NULL, alloc_gfp, _RET_IP_);
5270 
5271 	if (!(s->flags & __CMPXCHG_DOUBLE) && !kmem_cache_debug(s))
5272 		/*
5273 		 * kmalloc_nolock() is not supported on architectures that
5274 		 * don't implement cmpxchg16b and thus need slab_lock()
5275 		 * which could be preempted by a nmi.
5276 		 * But debug caches don't use that and only rely on
5277 		 * kmem_cache_node->list_lock, so kmalloc_nolock() can attempt
5278 		 * to allocate from debug caches by
5279 		 * spin_trylock_irqsave(&n->list_lock, ...)
5280 		 */
5281 		return NULL;
5282 
5283 	ret = alloc_from_pcs(s, alloc_gfp, node);
5284 	if (ret)
5285 		goto success;
5286 
5287 	/*
5288 	 * Do not call slab_alloc_node(), since trylock mode isn't
5289 	 * compatible with slab_pre_alloc_hook/should_failslab and
5290 	 * kfence_alloc. Hence call __slab_alloc_node() (at most twice)
5291 	 * and slab_post_alloc_hook() directly.
5292 	 */
5293 	ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size);
5294 
5295 	/*
5296 	 * It's possible we failed due to trylock as we preempted someone with
5297 	 * the sheaves locked, and the list_lock is also held by another cpu.
5298 	 * But it should be rare that multiple kmalloc buckets would have
5299 	 * sheaves locked, so try a larger one.
5300 	 */
5301 	if (!ret && can_retry) {
5302 		/* pick the next kmalloc bucket */
5303 		size = s->object_size + 1;
5304 		/*
5305 		 * Another alternative is to
5306 		 * if (memcg) alloc_gfp &= ~__GFP_ACCOUNT;
5307 		 * else if (!memcg) alloc_gfp |= __GFP_ACCOUNT;
5308 		 * to retry from bucket of the same size.
5309 		 */
5310 		can_retry = false;
5311 		goto retry;
5312 	}
5313 
5314 success:
5315 	maybe_wipe_obj_freeptr(s, ret);
5316 	slab_post_alloc_hook(s, NULL, alloc_gfp, 1, &ret,
5317 			     slab_want_init_on_alloc(alloc_gfp, s), size);
5318 
5319 	ret = kasan_kmalloc(s, ret, size, alloc_gfp);
5320 	return ret;
5321 }
5322 EXPORT_SYMBOL_GPL(kmalloc_nolock_noprof);
5323 
5324 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
5325 					 int node, unsigned long caller)
5326 {
5327 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
5328 
5329 }
5330 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
5331 
5332 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
5333 {
5334 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
5335 					    _RET_IP_, size);
5336 
5337 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
5338 
5339 	ret = kasan_kmalloc(s, ret, size, gfpflags);
5340 	return ret;
5341 }
5342 EXPORT_SYMBOL(__kmalloc_cache_noprof);
5343 
5344 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
5345 				  int node, size_t size)
5346 {
5347 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
5348 
5349 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
5350 
5351 	ret = kasan_kmalloc(s, ret, size, gfpflags);
5352 	return ret;
5353 }
5354 EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
5355 
5356 static noinline void free_to_partial_list(
5357 	struct kmem_cache *s, struct slab *slab,
5358 	void *head, void *tail, int bulk_cnt,
5359 	unsigned long addr)
5360 {
5361 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
5362 	struct slab *slab_free = NULL;
5363 	int cnt = bulk_cnt;
5364 	unsigned long flags;
5365 	depot_stack_handle_t handle = 0;
5366 
5367 	/*
5368 	 * We cannot use GFP_NOWAIT as there are callsites where waking up
5369 	 * kswapd could deadlock
5370 	 */
5371 	if (s->flags & SLAB_STORE_USER)
5372 		handle = set_track_prepare(__GFP_NOWARN);
5373 
5374 	spin_lock_irqsave(&n->list_lock, flags);
5375 
5376 	if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
5377 		void *prior = slab->freelist;
5378 
5379 		/* Perform the actual freeing while we still hold the locks */
5380 		slab->inuse -= cnt;
5381 		set_freepointer(s, tail, prior);
5382 		slab->freelist = head;
5383 
5384 		/*
5385 		 * If the slab is empty, and node's partial list is full,
5386 		 * it should be discarded anyway no matter it's on full or
5387 		 * partial list.
5388 		 */
5389 		if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
5390 			slab_free = slab;
5391 
5392 		if (!prior) {
5393 			/* was on full list */
5394 			remove_full(s, n, slab);
5395 			if (!slab_free) {
5396 				add_partial(n, slab, ADD_TO_TAIL);
5397 				stat(s, FREE_ADD_PARTIAL);
5398 			}
5399 		} else if (slab_free) {
5400 			remove_partial(n, slab);
5401 			stat(s, FREE_REMOVE_PARTIAL);
5402 		}
5403 	}
5404 
5405 	if (slab_free) {
5406 		/*
5407 		 * Update the counters while still holding n->list_lock to
5408 		 * prevent spurious validation warnings
5409 		 */
5410 		dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
5411 	}
5412 
5413 	spin_unlock_irqrestore(&n->list_lock, flags);
5414 
5415 	if (slab_free) {
5416 		stat(s, FREE_SLAB);
5417 		free_slab(s, slab_free);
5418 	}
5419 }
5420 
5421 /*
5422  * Slow path handling. This may still be called frequently since objects
5423  * have a longer lifetime than the cpu slabs in most processing loads.
5424  *
5425  * So we still attempt to reduce cache line usage. Just take the slab
5426  * lock and free the item. If there is no additional partial slab
5427  * handling required then we can return immediately.
5428  */
5429 static void __slab_free(struct kmem_cache *s, struct slab *slab,
5430 			void *head, void *tail, int cnt,
5431 			unsigned long addr)
5432 
5433 {
5434 	bool was_full;
5435 	struct freelist_counters old, new;
5436 	struct kmem_cache_node *n = NULL;
5437 	unsigned long flags;
5438 	bool on_node_partial;
5439 
5440 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
5441 		free_to_partial_list(s, slab, head, tail, cnt, addr);
5442 		return;
5443 	}
5444 
5445 	do {
5446 		if (unlikely(n)) {
5447 			spin_unlock_irqrestore(&n->list_lock, flags);
5448 			n = NULL;
5449 		}
5450 
5451 		old.freelist = slab->freelist;
5452 		old.counters = slab->counters;
5453 
5454 		was_full = (old.freelist == NULL);
5455 
5456 		set_freepointer(s, tail, old.freelist);
5457 
5458 		new.freelist = head;
5459 		new.counters = old.counters;
5460 		new.inuse -= cnt;
5461 
5462 		/*
5463 		 * Might need to be taken off (due to becoming empty) or added
5464 		 * to (due to not being full anymore) the partial list.
5465 		 * Unless it's frozen.
5466 		 */
5467 		if (!new.inuse || was_full) {
5468 
5469 			n = get_node(s, slab_nid(slab));
5470 			/*
5471 			 * Speculatively acquire the list_lock.
5472 			 * If the cmpxchg does not succeed then we may
5473 			 * drop the list_lock without any processing.
5474 			 *
5475 			 * Otherwise the list_lock will synchronize with
5476 			 * other processors updating the list of slabs.
5477 			 */
5478 			spin_lock_irqsave(&n->list_lock, flags);
5479 
5480 			on_node_partial = slab_test_node_partial(slab);
5481 		}
5482 
5483 	} while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
5484 
5485 	if (likely(!n)) {
5486 		/*
5487 		 * We didn't take the list_lock because the slab was already on
5488 		 * the partial list and will remain there.
5489 		 */
5490 		return;
5491 	}
5492 
5493 	/*
5494 	 * This slab was partially empty but not on the per-node partial list,
5495 	 * in which case we shouldn't manipulate its list, just return.
5496 	 */
5497 	if (!was_full && !on_node_partial) {
5498 		spin_unlock_irqrestore(&n->list_lock, flags);
5499 		return;
5500 	}
5501 
5502 	/*
5503 	 * If slab became empty, should we add/keep it on the partial list or we
5504 	 * have enough?
5505 	 */
5506 	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
5507 		goto slab_empty;
5508 
5509 	/*
5510 	 * Objects left in the slab. If it was not on the partial list before
5511 	 * then add it.
5512 	 */
5513 	if (unlikely(was_full)) {
5514 		add_partial(n, slab, ADD_TO_TAIL);
5515 		stat(s, FREE_ADD_PARTIAL);
5516 	}
5517 	spin_unlock_irqrestore(&n->list_lock, flags);
5518 	return;
5519 
5520 slab_empty:
5521 	/*
5522 	 * The slab could have a single object and thus go from full to empty in
5523 	 * a single free, but more likely it was on the partial list. Remove it.
5524 	 */
5525 	if (likely(!was_full)) {
5526 		remove_partial(n, slab);
5527 		stat(s, FREE_REMOVE_PARTIAL);
5528 	}
5529 
5530 	spin_unlock_irqrestore(&n->list_lock, flags);
5531 	stat(s, FREE_SLAB);
5532 	discard_slab(s, slab);
5533 }
5534 
5535 /*
5536  * pcs is locked. We should have get rid of the spare sheaf and obtained an
5537  * empty sheaf, while the main sheaf is full. We want to install the empty sheaf
5538  * as a main sheaf, and make the current main sheaf a spare sheaf.
5539  *
5540  * However due to having relinquished the cpu_sheaves lock when obtaining
5541  * the empty sheaf, we need to handle some unlikely but possible cases.
5542  *
5543  * If we put any sheaf to barn here, it's because we were interrupted or have
5544  * been migrated to a different cpu, which should be rare enough so just ignore
5545  * the barn's limits to simplify the handling.
5546  *
5547  * An alternative scenario that gets us here is when we fail
5548  * barn_replace_full_sheaf(), because there's no empty sheaf available in the
5549  * barn, so we had to allocate it by alloc_empty_sheaf(). But because we saw the
5550  * limit on full sheaves was not exceeded, we assume it didn't change and just
5551  * put the full sheaf there.
5552  */
5553 static void __pcs_install_empty_sheaf(struct kmem_cache *s,
5554 		struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty,
5555 		struct node_barn *barn)
5556 {
5557 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
5558 
5559 	/* This is what we expect to find if nobody interrupted us. */
5560 	if (likely(!pcs->spare)) {
5561 		pcs->spare = pcs->main;
5562 		pcs->main = empty;
5563 		return;
5564 	}
5565 
5566 	/*
5567 	 * Unlikely because if the main sheaf had space, we would have just
5568 	 * freed to it. Get rid of our empty sheaf.
5569 	 */
5570 	if (pcs->main->size < s->sheaf_capacity) {
5571 		barn_put_empty_sheaf(barn, empty);
5572 		return;
5573 	}
5574 
5575 	/* Also unlikely for the same reason */
5576 	if (pcs->spare->size < s->sheaf_capacity) {
5577 		swap(pcs->main, pcs->spare);
5578 		barn_put_empty_sheaf(barn, empty);
5579 		return;
5580 	}
5581 
5582 	/*
5583 	 * We probably failed barn_replace_full_sheaf() due to no empty sheaf
5584 	 * available there, but we allocated one, so finish the job.
5585 	 */
5586 	barn_put_full_sheaf(barn, pcs->main);
5587 	stat(s, BARN_PUT);
5588 	pcs->main = empty;
5589 }
5590 
5591 /*
5592  * Replace the full main sheaf with a (at least partially) empty sheaf.
5593  *
5594  * Must be called with the cpu_sheaves local lock locked. If successful, returns
5595  * the pcs pointer and the local lock locked (possibly on a different cpu than
5596  * initially called). If not successful, returns NULL and the local lock
5597  * unlocked.
5598  */
5599 static struct slub_percpu_sheaves *
5600 __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
5601 			bool allow_spin)
5602 {
5603 	struct slab_sheaf *empty;
5604 	struct node_barn *barn;
5605 	bool put_fail;
5606 
5607 restart:
5608 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
5609 
5610 	/* Bootstrap or debug cache, back off */
5611 	if (unlikely(!cache_has_sheaves(s))) {
5612 		local_unlock(&s->cpu_sheaves->lock);
5613 		return NULL;
5614 	}
5615 
5616 	barn = get_barn(s);
5617 	if (!barn) {
5618 		local_unlock(&s->cpu_sheaves->lock);
5619 		return NULL;
5620 	}
5621 
5622 	put_fail = false;
5623 
5624 	if (!pcs->spare) {
5625 		empty = barn_get_empty_sheaf(barn, allow_spin);
5626 		if (empty) {
5627 			pcs->spare = pcs->main;
5628 			pcs->main = empty;
5629 			return pcs;
5630 		}
5631 		goto alloc_empty;
5632 	}
5633 
5634 	if (pcs->spare->size < s->sheaf_capacity) {
5635 		swap(pcs->main, pcs->spare);
5636 		return pcs;
5637 	}
5638 
5639 	empty = barn_replace_full_sheaf(barn, pcs->main, allow_spin);
5640 
5641 	if (!IS_ERR(empty)) {
5642 		stat(s, BARN_PUT);
5643 		pcs->main = empty;
5644 		return pcs;
5645 	}
5646 
5647 	/* sheaf_flush_unused() doesn't support !allow_spin */
5648 	if (PTR_ERR(empty) == -E2BIG && allow_spin) {
5649 		/* Since we got here, spare exists and is full */
5650 		struct slab_sheaf *to_flush = pcs->spare;
5651 
5652 		stat(s, BARN_PUT_FAIL);
5653 
5654 		pcs->spare = NULL;
5655 		local_unlock(&s->cpu_sheaves->lock);
5656 
5657 		sheaf_flush_unused(s, to_flush);
5658 		empty = to_flush;
5659 		goto got_empty;
5660 	}
5661 
5662 	/*
5663 	 * We could not replace full sheaf because barn had no empty
5664 	 * sheaves. We can still allocate it and put the full sheaf in
5665 	 * __pcs_install_empty_sheaf(), but if we fail to allocate it,
5666 	 * make sure to count the fail.
5667 	 */
5668 	put_fail = true;
5669 
5670 alloc_empty:
5671 	local_unlock(&s->cpu_sheaves->lock);
5672 
5673 	/*
5674 	 * alloc_empty_sheaf() doesn't support !allow_spin and it's
5675 	 * easier to fall back to freeing directly without sheaves
5676 	 * than add the support (and to sheaf_flush_unused() above)
5677 	 */
5678 	if (!allow_spin)
5679 		return NULL;
5680 
5681 	empty = alloc_empty_sheaf(s, GFP_NOWAIT);
5682 	if (empty)
5683 		goto got_empty;
5684 
5685 	if (put_fail)
5686 		 stat(s, BARN_PUT_FAIL);
5687 
5688 	if (!sheaf_flush_main(s))
5689 		return NULL;
5690 
5691 	if (!local_trylock(&s->cpu_sheaves->lock))
5692 		return NULL;
5693 
5694 	pcs = this_cpu_ptr(s->cpu_sheaves);
5695 
5696 	/*
5697 	 * we flushed the main sheaf so it should be empty now,
5698 	 * but in case we got preempted or migrated, we need to
5699 	 * check again
5700 	 */
5701 	if (pcs->main->size == s->sheaf_capacity)
5702 		goto restart;
5703 
5704 	return pcs;
5705 
5706 got_empty:
5707 	if (!local_trylock(&s->cpu_sheaves->lock)) {
5708 		barn_put_empty_sheaf(barn, empty);
5709 		return NULL;
5710 	}
5711 
5712 	pcs = this_cpu_ptr(s->cpu_sheaves);
5713 	__pcs_install_empty_sheaf(s, pcs, empty, barn);
5714 
5715 	return pcs;
5716 }
5717 
5718 /*
5719  * Free an object to the percpu sheaves.
5720  * The object is expected to have passed slab_free_hook() already.
5721  */
5722 static __fastpath_inline
5723 bool free_to_pcs(struct kmem_cache *s, void *object, bool allow_spin)
5724 {
5725 	struct slub_percpu_sheaves *pcs;
5726 
5727 	if (!local_trylock(&s->cpu_sheaves->lock))
5728 		return false;
5729 
5730 	pcs = this_cpu_ptr(s->cpu_sheaves);
5731 
5732 	if (unlikely(pcs->main->size == s->sheaf_capacity)) {
5733 
5734 		pcs = __pcs_replace_full_main(s, pcs, allow_spin);
5735 		if (unlikely(!pcs))
5736 			return false;
5737 	}
5738 
5739 	pcs->main->objects[pcs->main->size++] = object;
5740 
5741 	local_unlock(&s->cpu_sheaves->lock);
5742 
5743 	stat(s, FREE_FASTPATH);
5744 
5745 	return true;
5746 }
5747 
5748 static void rcu_free_sheaf(struct rcu_head *head)
5749 {
5750 	struct kmem_cache_node *n;
5751 	struct slab_sheaf *sheaf;
5752 	struct node_barn *barn = NULL;
5753 	struct kmem_cache *s;
5754 
5755 	sheaf = container_of(head, struct slab_sheaf, rcu_head);
5756 
5757 	s = sheaf->cache;
5758 
5759 	/*
5760 	 * This may remove some objects due to slab_free_hook() returning false,
5761 	 * so that the sheaf might no longer be completely full. But it's easier
5762 	 * to handle it as full (unless it became completely empty), as the code
5763 	 * handles it fine. The only downside is that sheaf will serve fewer
5764 	 * allocations when reused. It only happens due to debugging, which is a
5765 	 * performance hit anyway.
5766 	 *
5767 	 * If it returns true, there was at least one object from pfmemalloc
5768 	 * slab so simply flush everything.
5769 	 */
5770 	if (__rcu_free_sheaf_prepare(s, sheaf))
5771 		goto flush;
5772 
5773 	n = get_node(s, sheaf->node);
5774 	if (!n)
5775 		goto flush;
5776 
5777 	barn = n->barn;
5778 
5779 	/* due to slab_free_hook() */
5780 	if (unlikely(sheaf->size == 0))
5781 		goto empty;
5782 
5783 	/*
5784 	 * Checking nr_full/nr_empty outside lock avoids contention in case the
5785 	 * barn is at the respective limit. Due to the race we might go over the
5786 	 * limit but that should be rare and harmless.
5787 	 */
5788 
5789 	if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) {
5790 		stat(s, BARN_PUT);
5791 		barn_put_full_sheaf(barn, sheaf);
5792 		return;
5793 	}
5794 
5795 flush:
5796 	stat(s, BARN_PUT_FAIL);
5797 	sheaf_flush_unused(s, sheaf);
5798 
5799 empty:
5800 	if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
5801 		barn_put_empty_sheaf(barn, sheaf);
5802 		return;
5803 	}
5804 
5805 	free_empty_sheaf(s, sheaf);
5806 }
5807 
5808 /*
5809  * kvfree_call_rcu() can be called while holding a raw_spinlock_t. Since
5810  * __kfree_rcu_sheaf() may acquire a spinlock_t (sleeping lock on PREEMPT_RT),
5811  * this would violate lock nesting rules. Therefore, kvfree_call_rcu() avoids
5812  * this problem by bypassing the sheaves layer entirely on PREEMPT_RT.
5813  *
5814  * However, lockdep still complains that it is invalid to acquire spinlock_t
5815  * while holding raw_spinlock_t, even on !PREEMPT_RT where spinlock_t is a
5816  * spinning lock. Tell lockdep that acquiring spinlock_t is valid here
5817  * by temporarily raising the wait-type to LD_WAIT_CONFIG.
5818  */
5819 static DEFINE_WAIT_OVERRIDE_MAP(kfree_rcu_sheaf_map, LD_WAIT_CONFIG);
5820 
5821 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
5822 {
5823 	struct slub_percpu_sheaves *pcs;
5824 	struct slab_sheaf *rcu_sheaf;
5825 
5826 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
5827 		return false;
5828 
5829 	lock_map_acquire_try(&kfree_rcu_sheaf_map);
5830 
5831 	if (!local_trylock(&s->cpu_sheaves->lock))
5832 		goto fail;
5833 
5834 	pcs = this_cpu_ptr(s->cpu_sheaves);
5835 
5836 	if (unlikely(!pcs->rcu_free)) {
5837 
5838 		struct slab_sheaf *empty;
5839 		struct node_barn *barn;
5840 
5841 		/* Bootstrap or debug cache, fall back */
5842 		if (unlikely(!cache_has_sheaves(s))) {
5843 			local_unlock(&s->cpu_sheaves->lock);
5844 			goto fail;
5845 		}
5846 
5847 		if (pcs->spare && pcs->spare->size == 0) {
5848 			pcs->rcu_free = pcs->spare;
5849 			pcs->spare = NULL;
5850 			goto do_free;
5851 		}
5852 
5853 		barn = get_barn(s);
5854 		if (!barn) {
5855 			local_unlock(&s->cpu_sheaves->lock);
5856 			goto fail;
5857 		}
5858 
5859 		empty = barn_get_empty_sheaf(barn, true);
5860 
5861 		if (empty) {
5862 			pcs->rcu_free = empty;
5863 			goto do_free;
5864 		}
5865 
5866 		local_unlock(&s->cpu_sheaves->lock);
5867 
5868 		empty = alloc_empty_sheaf(s, GFP_NOWAIT);
5869 
5870 		if (!empty)
5871 			goto fail;
5872 
5873 		if (!local_trylock(&s->cpu_sheaves->lock)) {
5874 			barn_put_empty_sheaf(barn, empty);
5875 			goto fail;
5876 		}
5877 
5878 		pcs = this_cpu_ptr(s->cpu_sheaves);
5879 
5880 		if (unlikely(pcs->rcu_free))
5881 			barn_put_empty_sheaf(barn, empty);
5882 		else
5883 			pcs->rcu_free = empty;
5884 	}
5885 
5886 do_free:
5887 
5888 	rcu_sheaf = pcs->rcu_free;
5889 
5890 	/*
5891 	 * Since we flush immediately when size reaches capacity, we never reach
5892 	 * this with size already at capacity, so no OOB write is possible.
5893 	 */
5894 	rcu_sheaf->objects[rcu_sheaf->size++] = obj;
5895 
5896 	if (likely(rcu_sheaf->size < s->sheaf_capacity)) {
5897 		rcu_sheaf = NULL;
5898 	} else {
5899 		pcs->rcu_free = NULL;
5900 		rcu_sheaf->node = numa_mem_id();
5901 	}
5902 
5903 	/*
5904 	 * we flush before local_unlock to make sure a racing
5905 	 * flush_all_rcu_sheaves() doesn't miss this sheaf
5906 	 */
5907 	if (rcu_sheaf)
5908 		call_rcu(&rcu_sheaf->rcu_head, rcu_free_sheaf);
5909 
5910 	local_unlock(&s->cpu_sheaves->lock);
5911 
5912 	stat(s, FREE_RCU_SHEAF);
5913 	lock_map_release(&kfree_rcu_sheaf_map);
5914 	return true;
5915 
5916 fail:
5917 	stat(s, FREE_RCU_SHEAF_FAIL);
5918 	lock_map_release(&kfree_rcu_sheaf_map);
5919 	return false;
5920 }
5921 
5922 /*
5923  * Bulk free objects to the percpu sheaves.
5924  * Unlike free_to_pcs() this includes the calls to all necessary hooks
5925  * and the fallback to freeing to slab pages.
5926  */
5927 static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
5928 {
5929 	struct slub_percpu_sheaves *pcs;
5930 	struct slab_sheaf *main, *empty;
5931 	bool init = slab_want_init_on_free(s);
5932 	unsigned int batch, i = 0;
5933 	struct node_barn *barn;
5934 	void *remote_objects[PCS_BATCH_MAX];
5935 	unsigned int remote_nr = 0;
5936 	int node = numa_mem_id();
5937 
5938 next_remote_batch:
5939 	while (i < size) {
5940 		struct slab *slab = virt_to_slab(p[i]);
5941 
5942 		memcg_slab_free_hook(s, slab, p + i, 1);
5943 		alloc_tagging_slab_free_hook(s, slab, p + i, 1);
5944 
5945 		if (unlikely(!slab_free_hook(s, p[i], init, false))) {
5946 			p[i] = p[--size];
5947 			continue;
5948 		}
5949 
5950 		if (unlikely((IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)
5951 			     || slab_test_pfmemalloc(slab))) {
5952 			remote_objects[remote_nr] = p[i];
5953 			p[i] = p[--size];
5954 			if (++remote_nr >= PCS_BATCH_MAX)
5955 				goto flush_remote;
5956 			continue;
5957 		}
5958 
5959 		i++;
5960 	}
5961 
5962 	if (!size)
5963 		goto flush_remote;
5964 
5965 next_batch:
5966 	if (!local_trylock(&s->cpu_sheaves->lock))
5967 		goto fallback;
5968 
5969 	pcs = this_cpu_ptr(s->cpu_sheaves);
5970 
5971 	if (likely(pcs->main->size < s->sheaf_capacity))
5972 		goto do_free;
5973 
5974 	barn = get_barn(s);
5975 	if (!barn)
5976 		goto no_empty;
5977 
5978 	if (!pcs->spare) {
5979 		empty = barn_get_empty_sheaf(barn, true);
5980 		if (!empty)
5981 			goto no_empty;
5982 
5983 		pcs->spare = pcs->main;
5984 		pcs->main = empty;
5985 		goto do_free;
5986 	}
5987 
5988 	if (pcs->spare->size < s->sheaf_capacity) {
5989 		swap(pcs->main, pcs->spare);
5990 		goto do_free;
5991 	}
5992 
5993 	empty = barn_replace_full_sheaf(barn, pcs->main, true);
5994 	if (IS_ERR(empty)) {
5995 		stat(s, BARN_PUT_FAIL);
5996 		goto no_empty;
5997 	}
5998 
5999 	stat(s, BARN_PUT);
6000 	pcs->main = empty;
6001 
6002 do_free:
6003 	main = pcs->main;
6004 	batch = min(size, s->sheaf_capacity - main->size);
6005 
6006 	memcpy(main->objects + main->size, p, batch * sizeof(void *));
6007 	main->size += batch;
6008 
6009 	local_unlock(&s->cpu_sheaves->lock);
6010 
6011 	stat_add(s, FREE_FASTPATH, batch);
6012 
6013 	if (batch < size) {
6014 		p += batch;
6015 		size -= batch;
6016 		goto next_batch;
6017 	}
6018 
6019 	if (remote_nr)
6020 		goto flush_remote;
6021 
6022 	return;
6023 
6024 no_empty:
6025 	local_unlock(&s->cpu_sheaves->lock);
6026 
6027 	/*
6028 	 * if we depleted all empty sheaves in the barn or there are too
6029 	 * many full sheaves, free the rest to slab pages
6030 	 */
6031 fallback:
6032 	__kmem_cache_free_bulk(s, size, p);
6033 	stat_add(s, FREE_SLOWPATH, size);
6034 
6035 flush_remote:
6036 	if (remote_nr) {
6037 		__kmem_cache_free_bulk(s, remote_nr, &remote_objects[0]);
6038 		stat_add(s, FREE_SLOWPATH, remote_nr);
6039 		if (i < size) {
6040 			remote_nr = 0;
6041 			goto next_remote_batch;
6042 		}
6043 	}
6044 }
6045 
6046 struct defer_free {
6047 	struct llist_head objects;
6048 	struct irq_work work;
6049 };
6050 
6051 static void free_deferred_objects(struct irq_work *work);
6052 
6053 static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = {
6054 	.objects = LLIST_HEAD_INIT(objects),
6055 	.work = IRQ_WORK_INIT(free_deferred_objects),
6056 };
6057 
6058 /*
6059  * In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe
6060  * to take sleeping spin_locks from __slab_free().
6061  * In !PREEMPT_RT irq_work will run after local_unlock_irqrestore().
6062  */
6063 static void free_deferred_objects(struct irq_work *work)
6064 {
6065 	struct defer_free *df = container_of(work, struct defer_free, work);
6066 	struct llist_head *objs = &df->objects;
6067 	struct llist_node *llnode, *pos, *t;
6068 
6069 	if (llist_empty(objs))
6070 		return;
6071 
6072 	llnode = llist_del_all(objs);
6073 	llist_for_each_safe(pos, t, llnode) {
6074 		struct kmem_cache *s;
6075 		struct slab *slab;
6076 		void *x = pos;
6077 
6078 		slab = virt_to_slab(x);
6079 		s = slab->slab_cache;
6080 
6081 		/* Point 'x' back to the beginning of allocated object */
6082 		x -= s->offset;
6083 
6084 		/*
6085 		 * We used freepointer in 'x' to link 'x' into df->objects.
6086 		 * Clear it to NULL to avoid false positive detection
6087 		 * of "Freepointer corruption".
6088 		 */
6089 		set_freepointer(s, x, NULL);
6090 
6091 		__slab_free(s, slab, x, x, 1, _THIS_IP_);
6092 		stat(s, FREE_SLOWPATH);
6093 	}
6094 }
6095 
6096 static void defer_free(struct kmem_cache *s, void *head)
6097 {
6098 	struct defer_free *df;
6099 
6100 	guard(preempt)();
6101 
6102 	head = kasan_reset_tag(head);
6103 
6104 	df = this_cpu_ptr(&defer_free_objects);
6105 	if (llist_add(head + s->offset, &df->objects))
6106 		irq_work_queue(&df->work);
6107 }
6108 
6109 void defer_free_barrier(void)
6110 {
6111 	int cpu;
6112 
6113 	for_each_possible_cpu(cpu)
6114 		irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
6115 }
6116 
6117 static __fastpath_inline
6118 void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
6119 	       unsigned long addr)
6120 {
6121 	memcg_slab_free_hook(s, slab, &object, 1);
6122 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
6123 
6124 	if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), false)))
6125 		return;
6126 
6127 	if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())
6128 	    && likely(!slab_test_pfmemalloc(slab))) {
6129 		if (likely(free_to_pcs(s, object, true)))
6130 			return;
6131 	}
6132 
6133 	__slab_free(s, slab, object, object, 1, addr);
6134 	stat(s, FREE_SLOWPATH);
6135 }
6136 
6137 #ifdef CONFIG_MEMCG
6138 /* Do not inline the rare memcg charging failed path into the allocation path */
6139 static noinline
6140 void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
6141 {
6142 	struct slab *slab = virt_to_slab(object);
6143 
6144 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
6145 
6146 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
6147 		__slab_free(s, slab, object, object, 1, _RET_IP_);
6148 }
6149 #endif
6150 
6151 static __fastpath_inline
6152 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
6153 		    void *tail, void **p, int cnt, unsigned long addr)
6154 {
6155 	memcg_slab_free_hook(s, slab, p, cnt);
6156 	alloc_tagging_slab_free_hook(s, slab, p, cnt);
6157 	/*
6158 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
6159 	 * to remove objects, whose reuse must be delayed.
6160 	 */
6161 	if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) {
6162 		__slab_free(s, slab, head, tail, cnt, addr);
6163 		stat_add(s, FREE_SLOWPATH, cnt);
6164 	}
6165 }
6166 
6167 #ifdef CONFIG_SLUB_RCU_DEBUG
6168 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
6169 {
6170 	struct rcu_delayed_free *delayed_free =
6171 			container_of(rcu_head, struct rcu_delayed_free, head);
6172 	void *object = delayed_free->object;
6173 	struct slab *slab = virt_to_slab(object);
6174 	struct kmem_cache *s;
6175 
6176 	kfree(delayed_free);
6177 
6178 	if (WARN_ON(is_kfence_address(object)))
6179 		return;
6180 
6181 	/* find the object and the cache again */
6182 	if (WARN_ON(!slab))
6183 		return;
6184 	s = slab->slab_cache;
6185 	if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
6186 		return;
6187 
6188 	/* resume freeing */
6189 	if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) {
6190 		__slab_free(s, slab, object, object, 1, _THIS_IP_);
6191 		stat(s, FREE_SLOWPATH);
6192 	}
6193 }
6194 #endif /* CONFIG_SLUB_RCU_DEBUG */
6195 
6196 #ifdef CONFIG_KASAN_GENERIC
6197 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
6198 {
6199 	__slab_free(cache, virt_to_slab(x), x, x, 1, addr);
6200 	stat(cache, FREE_SLOWPATH);
6201 }
6202 #endif
6203 
6204 static noinline void warn_free_bad_obj(struct kmem_cache *s, void *obj)
6205 {
6206 	struct kmem_cache *cachep;
6207 	struct slab *slab;
6208 
6209 	slab = virt_to_slab(obj);
6210 	if (WARN_ONCE(!slab,
6211 			"kmem_cache_free(%s, %p): object is not in a slab page\n",
6212 			s->name, obj))
6213 		return;
6214 
6215 	cachep = slab->slab_cache;
6216 
6217 	if (WARN_ONCE(cachep != s,
6218 			"kmem_cache_free(%s, %p): object belongs to different cache %s\n",
6219 			s->name, obj, cachep ? cachep->name : "(NULL)")) {
6220 		if (cachep)
6221 			print_tracking(cachep, obj);
6222 		return;
6223 	}
6224 }
6225 
6226 /**
6227  * kmem_cache_free - Deallocate an object
6228  * @s: The cache the allocation was from.
6229  * @x: The previously allocated object.
6230  *
6231  * Free an object which was previously allocated from this
6232  * cache.
6233  */
6234 void kmem_cache_free(struct kmem_cache *s, void *x)
6235 {
6236 	struct slab *slab;
6237 
6238 	slab = virt_to_slab(x);
6239 
6240 	if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) ||
6241 	    kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
6242 
6243 		/*
6244 		 * Intentionally leak the object in these cases, because it
6245 		 * would be too dangerous to continue.
6246 		 */
6247 		if (unlikely(!slab || (slab->slab_cache != s))) {
6248 			warn_free_bad_obj(s, x);
6249 			return;
6250 		}
6251 	}
6252 
6253 	trace_kmem_cache_free(_RET_IP_, x, s);
6254 	slab_free(s, slab, x, _RET_IP_);
6255 }
6256 EXPORT_SYMBOL(kmem_cache_free);
6257 
6258 static inline size_t slab_ksize(struct slab *slab)
6259 {
6260 	struct kmem_cache *s = slab->slab_cache;
6261 
6262 #ifdef CONFIG_SLUB_DEBUG
6263 	/*
6264 	 * Debugging requires use of the padding between object
6265 	 * and whatever may come after it.
6266 	 */
6267 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
6268 		return s->object_size;
6269 #endif
6270 	if (s->flags & SLAB_KASAN)
6271 		return s->object_size;
6272 	/*
6273 	 * If we have the need to store the freelist pointer
6274 	 * or any other metadata back there then we can
6275 	 * only use the space before that information.
6276 	 */
6277 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
6278 		return s->inuse;
6279 	else if (obj_exts_in_object(s, slab))
6280 		return s->inuse;
6281 	/*
6282 	 * Else we can use all the padding etc for the allocation
6283 	 */
6284 	return s->size;
6285 }
6286 
6287 static size_t __ksize(const void *object)
6288 {
6289 	struct page *page;
6290 	struct slab *slab;
6291 
6292 	if (unlikely(object == ZERO_SIZE_PTR))
6293 		return 0;
6294 
6295 	page = virt_to_page(object);
6296 
6297 	if (unlikely(PageLargeKmalloc(page)))
6298 		return large_kmalloc_size(page);
6299 
6300 	slab = page_slab(page);
6301 	/* Delete this after we're sure there are no users */
6302 	if (WARN_ON(!slab))
6303 		return page_size(page);
6304 
6305 #ifdef CONFIG_SLUB_DEBUG
6306 	skip_orig_size_check(slab->slab_cache, object);
6307 #endif
6308 
6309 	return slab_ksize(slab);
6310 }
6311 
6312 /**
6313  * ksize -- Report full size of underlying allocation
6314  * @objp: pointer to the object
6315  *
6316  * This should only be used internally to query the true size of allocations.
6317  * It is not meant to be a way to discover the usable size of an allocation
6318  * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
6319  * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
6320  * and/or FORTIFY_SOURCE.
6321  *
6322  * Return: size of the actual memory used by @objp in bytes
6323  */
6324 size_t ksize(const void *objp)
6325 {
6326 	/*
6327 	 * We need to first check that the pointer to the object is valid.
6328 	 * The KASAN report printed from ksize() is more useful, then when
6329 	 * it's printed later when the behaviour could be undefined due to
6330 	 * a potential use-after-free or double-free.
6331 	 *
6332 	 * We use kasan_check_byte(), which is supported for the hardware
6333 	 * tag-based KASAN mode, unlike kasan_check_read/write().
6334 	 *
6335 	 * If the pointed to memory is invalid, we return 0 to avoid users of
6336 	 * ksize() writing to and potentially corrupting the memory region.
6337 	 *
6338 	 * We want to perform the check before __ksize(), to avoid potentially
6339 	 * crashing in __ksize() due to accessing invalid metadata.
6340 	 */
6341 	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
6342 		return 0;
6343 
6344 	return kfence_ksize(objp) ?: __ksize(objp);
6345 }
6346 EXPORT_SYMBOL(ksize);
6347 
6348 static void free_large_kmalloc(struct page *page, void *object)
6349 {
6350 	unsigned int order = compound_order(page);
6351 
6352 	if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
6353 		dump_page(page, "Not a kmalloc allocation");
6354 		return;
6355 	}
6356 
6357 	if (WARN_ON_ONCE(order == 0))
6358 		pr_warn_once("object pointer: 0x%p\n", object);
6359 
6360 	kmemleak_free(object);
6361 	kasan_kfree_large(object);
6362 	kmsan_kfree_large(object);
6363 
6364 	mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
6365 			      -(PAGE_SIZE << order));
6366 	__ClearPageLargeKmalloc(page);
6367 	free_frozen_pages(page, order);
6368 }
6369 
6370 /*
6371  * Given an rcu_head embedded within an object obtained from kvmalloc at an
6372  * offset < 4k, free the object in question.
6373  */
6374 void kvfree_rcu_cb(struct rcu_head *head)
6375 {
6376 	void *obj = head;
6377 	struct page *page;
6378 	struct slab *slab;
6379 	struct kmem_cache *s;
6380 	void *slab_addr;
6381 
6382 	if (is_vmalloc_addr(obj)) {
6383 		obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
6384 		vfree(obj);
6385 		return;
6386 	}
6387 
6388 	page = virt_to_page(obj);
6389 	slab = page_slab(page);
6390 	if (!slab) {
6391 		/*
6392 		 * rcu_head offset can be only less than page size so no need to
6393 		 * consider allocation order
6394 		 */
6395 		obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
6396 		free_large_kmalloc(page, obj);
6397 		return;
6398 	}
6399 
6400 	s = slab->slab_cache;
6401 	slab_addr = slab_address(slab);
6402 
6403 	if (is_kfence_address(obj)) {
6404 		obj = kfence_object_start(obj);
6405 	} else {
6406 		unsigned int idx = __obj_to_index(s, slab_addr, obj);
6407 
6408 		obj = slab_addr + s->size * idx;
6409 		obj = fixup_red_left(s, obj);
6410 	}
6411 
6412 	slab_free(s, slab, obj, _RET_IP_);
6413 }
6414 
6415 /**
6416  * kfree - free previously allocated memory
6417  * @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc()
6418  *
6419  * If @object is NULL, no operation is performed.
6420  */
6421 void kfree(const void *object)
6422 {
6423 	struct page *page;
6424 	struct slab *slab;
6425 	struct kmem_cache *s;
6426 	void *x = (void *)object;
6427 
6428 	trace_kfree(_RET_IP_, object);
6429 
6430 	if (unlikely(ZERO_OR_NULL_PTR(object)))
6431 		return;
6432 
6433 	page = virt_to_page(object);
6434 	slab = page_slab(page);
6435 	if (!slab) {
6436 		/* kmalloc_nolock() doesn't support large kmalloc */
6437 		free_large_kmalloc(page, (void *)object);
6438 		return;
6439 	}
6440 
6441 	s = slab->slab_cache;
6442 	slab_free(s, slab, x, _RET_IP_);
6443 }
6444 EXPORT_SYMBOL(kfree);
6445 
6446 /*
6447  * Can be called while holding raw_spinlock_t or from IRQ and NMI,
6448  * but ONLY for objects allocated by kmalloc_nolock().
6449  * Debug checks (like kmemleak and kfence) were skipped on allocation,
6450  * hence
6451  * obj = kmalloc(); kfree_nolock(obj);
6452  * will miss kmemleak/kfence book keeping and will cause false positives.
6453  * large_kmalloc is not supported either.
6454  */
6455 void kfree_nolock(const void *object)
6456 {
6457 	struct slab *slab;
6458 	struct kmem_cache *s;
6459 	void *x = (void *)object;
6460 
6461 	if (unlikely(ZERO_OR_NULL_PTR(object)))
6462 		return;
6463 
6464 	slab = virt_to_slab(object);
6465 	if (unlikely(!slab)) {
6466 		WARN_ONCE(1, "large_kmalloc is not supported by kfree_nolock()");
6467 		return;
6468 	}
6469 
6470 	s = slab->slab_cache;
6471 
6472 	memcg_slab_free_hook(s, slab, &x, 1);
6473 	alloc_tagging_slab_free_hook(s, slab, &x, 1);
6474 	/*
6475 	 * Unlike slab_free() do NOT call the following:
6476 	 * kmemleak_free_recursive(x, s->flags);
6477 	 * debug_check_no_locks_freed(x, s->object_size);
6478 	 * debug_check_no_obj_freed(x, s->object_size);
6479 	 * __kcsan_check_access(x, s->object_size, ..);
6480 	 * kfence_free(x);
6481 	 * since they take spinlocks or not safe from any context.
6482 	 */
6483 	kmsan_slab_free(s, x);
6484 	/*
6485 	 * If KASAN finds a kernel bug it will do kasan_report_invalid_free()
6486 	 * which will call raw_spin_lock_irqsave() which is technically
6487 	 * unsafe from NMI, but take chance and report kernel bug.
6488 	 * The sequence of
6489 	 * kasan_report_invalid_free() -> raw_spin_lock_irqsave() -> NMI
6490 	 *  -> kfree_nolock() -> kasan_report_invalid_free() on the same CPU
6491 	 * is double buggy and deserves to deadlock.
6492 	 */
6493 	if (kasan_slab_pre_free(s, x))
6494 		return;
6495 	/*
6496 	 * memcg, kasan_slab_pre_free are done for 'x'.
6497 	 * The only thing left is kasan_poison without quarantine,
6498 	 * since kasan quarantine takes locks and not supported from NMI.
6499 	 */
6500 	kasan_slab_free(s, x, false, false, /* skip quarantine */true);
6501 
6502 	if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())) {
6503 		if (likely(free_to_pcs(s, x, false)))
6504 			return;
6505 	}
6506 
6507 	/*
6508 	 * __slab_free() can locklessly cmpxchg16 into a slab, but then it might
6509 	 * need to take spin_lock for further processing.
6510 	 * Avoid the complexity and simply add to a deferred list.
6511 	 */
6512 	defer_free(s, x);
6513 }
6514 EXPORT_SYMBOL_GPL(kfree_nolock);
6515 
6516 static __always_inline __realloc_size(2) void *
6517 __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, int nid)
6518 {
6519 	void *ret;
6520 	size_t ks = 0;
6521 	int orig_size = 0;
6522 	struct kmem_cache *s = NULL;
6523 
6524 	if (unlikely(ZERO_OR_NULL_PTR(p)))
6525 		goto alloc_new;
6526 
6527 	/* Check for double-free. */
6528 	if (!kasan_check_byte(p))
6529 		return NULL;
6530 
6531 	/*
6532 	 * If reallocation is not necessary (e. g. the new size is less
6533 	 * than the current allocated size), the current allocation will be
6534 	 * preserved unless __GFP_THISNODE is set. In the latter case a new
6535 	 * allocation on the requested node will be attempted.
6536 	 */
6537 	if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
6538 		     nid != page_to_nid(virt_to_page(p)))
6539 		goto alloc_new;
6540 
6541 	if (is_kfence_address(p)) {
6542 		ks = orig_size = kfence_ksize(p);
6543 	} else {
6544 		struct page *page = virt_to_page(p);
6545 		struct slab *slab = page_slab(page);
6546 
6547 		if (!slab) {
6548 			/* Big kmalloc object */
6549 			ks = page_size(page);
6550 			WARN_ON(ks <= KMALLOC_MAX_CACHE_SIZE);
6551 			WARN_ON(p != page_address(page));
6552 		} else {
6553 			s = slab->slab_cache;
6554 			orig_size = get_orig_size(s, (void *)p);
6555 			ks = s->object_size;
6556 		}
6557 	}
6558 
6559 	/* If the old object doesn't fit, allocate a bigger one */
6560 	if (new_size > ks)
6561 		goto alloc_new;
6562 
6563 	/* If the old object doesn't satisfy the new alignment, allocate a new one */
6564 	if (!IS_ALIGNED((unsigned long)p, align))
6565 		goto alloc_new;
6566 
6567 	/* Zero out spare memory. */
6568 	if (want_init_on_alloc(flags)) {
6569 		kasan_disable_current();
6570 		if (orig_size && orig_size < new_size)
6571 			memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size);
6572 		else
6573 			memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
6574 		kasan_enable_current();
6575 	}
6576 
6577 	/* Setup kmalloc redzone when needed */
6578 	if (s && slub_debug_orig_size(s)) {
6579 		set_orig_size(s, (void *)p, new_size);
6580 		if (s->flags & SLAB_RED_ZONE && new_size < ks)
6581 			memset_no_sanitize_memory(kasan_reset_tag(p) + new_size,
6582 						SLUB_RED_ACTIVE, ks - new_size);
6583 	}
6584 
6585 	p = kasan_krealloc(p, new_size, flags);
6586 	return (void *)p;
6587 
6588 alloc_new:
6589 	ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
6590 	if (ret && p) {
6591 		/* Disable KASAN checks as the object's redzone is accessed. */
6592 		kasan_disable_current();
6593 		memcpy(ret, kasan_reset_tag(p), orig_size ?: ks);
6594 		kasan_enable_current();
6595 	}
6596 
6597 	return ret;
6598 }
6599 
6600 /**
6601  * krealloc_node_align - reallocate memory. The contents will remain unchanged.
6602  * @p: object to reallocate memory for.
6603  * @new_size: how many bytes of memory are required.
6604  * @align: desired alignment.
6605  * @flags: the type of memory to allocate.
6606  * @nid: NUMA node or NUMA_NO_NODE
6607  *
6608  * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
6609  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
6610  *
6611  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6612  * Documentation/core-api/memory-allocation.rst for more details.
6613  *
6614  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
6615  * initial memory allocation, every subsequent call to this API for the same
6616  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
6617  * __GFP_ZERO is not fully honored by this API.
6618  *
6619  * When slub_debug_orig_size() is off, krealloc() only knows about the bucket
6620  * size of an allocation (but not the exact size it was allocated with) and
6621  * hence implements the following semantics for shrinking and growing buffers
6622  * with __GFP_ZERO::
6623  *
6624  *           new             bucket
6625  *   0       size             size
6626  *   |--------|----------------|
6627  *   |  keep  |      zero      |
6628  *
6629  * Otherwise, the original allocation size 'orig_size' could be used to
6630  * precisely clear the requested size, and the new size will also be stored
6631  * as the new 'orig_size'.
6632  *
6633  * In any case, the contents of the object pointed to are preserved up to the
6634  * lesser of the new and old sizes.
6635  *
6636  * Return: pointer to the allocated memory or %NULL in case of error
6637  */
6638 void *krealloc_node_align_noprof(const void *p, size_t new_size, unsigned long align,
6639 				 gfp_t flags, int nid)
6640 {
6641 	void *ret;
6642 
6643 	if (unlikely(!new_size)) {
6644 		kfree(p);
6645 		return ZERO_SIZE_PTR;
6646 	}
6647 
6648 	ret = __do_krealloc(p, new_size, align, flags, nid);
6649 	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
6650 		kfree(p);
6651 
6652 	return ret;
6653 }
6654 EXPORT_SYMBOL(krealloc_node_align_noprof);
6655 
6656 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
6657 {
6658 	/*
6659 	 * We want to attempt a large physically contiguous block first because
6660 	 * it is less likely to fragment multiple larger blocks and therefore
6661 	 * contribute to a long term fragmentation less than vmalloc fallback.
6662 	 * However make sure that larger requests are not too disruptive - i.e.
6663 	 * do not direct reclaim unless physically continuous memory is preferred
6664 	 * (__GFP_RETRY_MAYFAIL mode). We still kick in kswapd/kcompactd to
6665 	 * start working in the background
6666 	 */
6667 	if (size > PAGE_SIZE) {
6668 		flags |= __GFP_NOWARN;
6669 
6670 		if (!(flags & __GFP_RETRY_MAYFAIL))
6671 			flags &= ~__GFP_DIRECT_RECLAIM;
6672 
6673 		/* nofail semantic is implemented by the vmalloc fallback */
6674 		flags &= ~__GFP_NOFAIL;
6675 	}
6676 
6677 	return flags;
6678 }
6679 
6680 /**
6681  * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
6682  * failure, fall back to non-contiguous (vmalloc) allocation.
6683  * @size: size of the request.
6684  * @b: which set of kmalloc buckets to allocate from.
6685  * @align: desired alignment.
6686  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
6687  * @node: numa node to allocate from
6688  *
6689  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6690  * Documentation/core-api/memory-allocation.rst for more details.
6691  *
6692  * Uses kmalloc to get the memory but if the allocation fails then falls back
6693  * to the vmalloc allocator. Use kvfree for freeing the memory.
6694  *
6695  * GFP_NOWAIT and GFP_ATOMIC are supported, the __GFP_NORETRY modifier is not.
6696  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
6697  * preferable to the vmalloc fallback, due to visible performance drawbacks.
6698  *
6699  * Return: pointer to the allocated memory of %NULL in case of failure
6700  */
6701 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
6702 			     gfp_t flags, int node)
6703 {
6704 	bool allow_block;
6705 	void *ret;
6706 
6707 	/*
6708 	 * It doesn't really make sense to fallback to vmalloc for sub page
6709 	 * requests
6710 	 */
6711 	ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b),
6712 				kmalloc_gfp_adjust(flags, size),
6713 				node, _RET_IP_);
6714 	if (ret || size <= PAGE_SIZE)
6715 		return ret;
6716 
6717 	/* Don't even allow crazy sizes */
6718 	if (unlikely(size > INT_MAX)) {
6719 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
6720 		return NULL;
6721 	}
6722 
6723 	/*
6724 	 * For non-blocking the VM_ALLOW_HUGE_VMAP is not used
6725 	 * because the huge-mapping path in vmalloc contains at
6726 	 * least one might_sleep() call.
6727 	 *
6728 	 * TODO: Revise huge-mapping path to support non-blocking
6729 	 * flags.
6730 	 */
6731 	allow_block = gfpflags_allow_blocking(flags);
6732 
6733 	/*
6734 	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
6735 	 * since the callers already cannot assume anything
6736 	 * about the resulting pointer, and cannot play
6737 	 * protection games.
6738 	 */
6739 	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
6740 			flags, PAGE_KERNEL, allow_block ? VM_ALLOW_HUGE_VMAP:0,
6741 			node, __builtin_return_address(0));
6742 }
6743 EXPORT_SYMBOL(__kvmalloc_node_noprof);
6744 
6745 /**
6746  * kvfree() - Free memory.
6747  * @addr: Pointer to allocated memory.
6748  *
6749  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
6750  * It is slightly more efficient to use kfree() or vfree() if you are certain
6751  * that you know which one to use.
6752  *
6753  * Context: Either preemptible task context or not-NMI interrupt.
6754  */
6755 void kvfree(const void *addr)
6756 {
6757 	if (is_vmalloc_addr(addr))
6758 		vfree(addr);
6759 	else
6760 		kfree(addr);
6761 }
6762 EXPORT_SYMBOL(kvfree);
6763 
6764 /**
6765  * kvfree_sensitive - Free a data object containing sensitive information.
6766  * @addr: address of the data object to be freed.
6767  * @len: length of the data object.
6768  *
6769  * Use the special memzero_explicit() function to clear the content of a
6770  * kvmalloc'ed object containing sensitive data to make sure that the
6771  * compiler won't optimize out the data clearing.
6772  */
6773 void kvfree_sensitive(const void *addr, size_t len)
6774 {
6775 	if (likely(!ZERO_OR_NULL_PTR(addr))) {
6776 		memzero_explicit((void *)addr, len);
6777 		kvfree(addr);
6778 	}
6779 }
6780 EXPORT_SYMBOL(kvfree_sensitive);
6781 
6782 /**
6783  * kvrealloc_node_align - reallocate memory; contents remain unchanged
6784  * @p: object to reallocate memory for
6785  * @size: the size to reallocate
6786  * @align: desired alignment
6787  * @flags: the flags for the page level allocator
6788  * @nid: NUMA node id
6789  *
6790  * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
6791  * and @p is not a %NULL pointer, the object pointed to is freed.
6792  *
6793  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6794  * Documentation/core-api/memory-allocation.rst for more details.
6795  *
6796  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
6797  * initial memory allocation, every subsequent call to this API for the same
6798  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
6799  * __GFP_ZERO is not fully honored by this API.
6800  *
6801  * In any case, the contents of the object pointed to are preserved up to the
6802  * lesser of the new and old sizes.
6803  *
6804  * This function must not be called concurrently with itself or kvfree() for the
6805  * same memory allocation.
6806  *
6807  * Return: pointer to the allocated memory or %NULL in case of error
6808  */
6809 void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
6810 				  gfp_t flags, int nid)
6811 {
6812 	void *n;
6813 
6814 	if (is_vmalloc_addr(p))
6815 		return vrealloc_node_align_noprof(p, size, align, flags, nid);
6816 
6817 	n = krealloc_node_align_noprof(p, size, align, kmalloc_gfp_adjust(flags, size), nid);
6818 	if (!n) {
6819 		/* We failed to krealloc(), fall back to kvmalloc(). */
6820 		n = kvmalloc_node_align_noprof(size, align, flags, nid);
6821 		if (!n)
6822 			return NULL;
6823 
6824 		if (p) {
6825 			/* We already know that `p` is not a vmalloc address. */
6826 			kasan_disable_current();
6827 			memcpy(n, kasan_reset_tag(p), ksize(p));
6828 			kasan_enable_current();
6829 
6830 			kfree(p);
6831 		}
6832 	}
6833 
6834 	return n;
6835 }
6836 EXPORT_SYMBOL(kvrealloc_node_align_noprof);
6837 
6838 struct detached_freelist {
6839 	struct slab *slab;
6840 	void *tail;
6841 	void *freelist;
6842 	int cnt;
6843 	struct kmem_cache *s;
6844 };
6845 
6846 /*
6847  * This function progressively scans the array with free objects (with
6848  * a limited look ahead) and extract objects belonging to the same
6849  * slab.  It builds a detached freelist directly within the given
6850  * slab/objects.  This can happen without any need for
6851  * synchronization, because the objects are owned by running process.
6852  * The freelist is build up as a single linked list in the objects.
6853  * The idea is, that this detached freelist can then be bulk
6854  * transferred to the real freelist(s), but only requiring a single
6855  * synchronization primitive.  Look ahead in the array is limited due
6856  * to performance reasons.
6857  */
6858 static inline
6859 int build_detached_freelist(struct kmem_cache *s, size_t size,
6860 			    void **p, struct detached_freelist *df)
6861 {
6862 	int lookahead = 3;
6863 	void *object;
6864 	struct page *page;
6865 	struct slab *slab;
6866 	size_t same;
6867 
6868 	object = p[--size];
6869 	page = virt_to_page(object);
6870 	slab = page_slab(page);
6871 	if (!s) {
6872 		/* Handle kalloc'ed objects */
6873 		if (!slab) {
6874 			free_large_kmalloc(page, object);
6875 			df->slab = NULL;
6876 			return size;
6877 		}
6878 		/* Derive kmem_cache from object */
6879 		df->slab = slab;
6880 		df->s = slab->slab_cache;
6881 	} else {
6882 		df->slab = slab;
6883 		df->s = s;
6884 	}
6885 
6886 	/* Start new detached freelist */
6887 	df->tail = object;
6888 	df->freelist = object;
6889 	df->cnt = 1;
6890 
6891 	if (is_kfence_address(object))
6892 		return size;
6893 
6894 	set_freepointer(df->s, object, NULL);
6895 
6896 	same = size;
6897 	while (size) {
6898 		object = p[--size];
6899 		/* df->slab is always set at this point */
6900 		if (df->slab == virt_to_slab(object)) {
6901 			/* Opportunity build freelist */
6902 			set_freepointer(df->s, object, df->freelist);
6903 			df->freelist = object;
6904 			df->cnt++;
6905 			same--;
6906 			if (size != same)
6907 				swap(p[size], p[same]);
6908 			continue;
6909 		}
6910 
6911 		/* Limit look ahead search */
6912 		if (!--lookahead)
6913 			break;
6914 	}
6915 
6916 	return same;
6917 }
6918 
6919 /*
6920  * Internal bulk free of objects that were not initialised by the post alloc
6921  * hooks and thus should not be processed by the free hooks
6922  */
6923 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
6924 {
6925 	if (!size)
6926 		return;
6927 
6928 	do {
6929 		struct detached_freelist df;
6930 
6931 		size = build_detached_freelist(s, size, p, &df);
6932 		if (!df.slab)
6933 			continue;
6934 
6935 		if (kfence_free(df.freelist))
6936 			continue;
6937 
6938 		__slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
6939 			     _RET_IP_);
6940 	} while (likely(size));
6941 }
6942 
6943 /* Note that interrupts must be enabled when calling this function. */
6944 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
6945 {
6946 	if (!size)
6947 		return;
6948 
6949 	/*
6950 	 * freeing to sheaves is so incompatible with the detached freelist so
6951 	 * once we go that way, we have to do everything differently
6952 	 */
6953 	if (s && cache_has_sheaves(s)) {
6954 		free_to_pcs_bulk(s, size, p);
6955 		return;
6956 	}
6957 
6958 	do {
6959 		struct detached_freelist df;
6960 
6961 		size = build_detached_freelist(s, size, p, &df);
6962 		if (!df.slab)
6963 			continue;
6964 
6965 		slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
6966 			       df.cnt, _RET_IP_);
6967 	} while (likely(size));
6968 }
6969 EXPORT_SYMBOL(kmem_cache_free_bulk);
6970 
6971 static unsigned int
6972 __refill_objects_node(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
6973 		      unsigned int max, struct kmem_cache_node *n,
6974 		      bool allow_spin)
6975 {
6976 	struct partial_bulk_context pc;
6977 	struct slab *slab, *slab2;
6978 	unsigned int refilled = 0;
6979 	unsigned long flags;
6980 	void *object;
6981 
6982 	pc.flags = gfp;
6983 	pc.min_objects = min;
6984 	pc.max_objects = max;
6985 
6986 	if (!get_partial_node_bulk(s, n, &pc, allow_spin))
6987 		return 0;
6988 
6989 	list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
6990 
6991 		list_del(&slab->slab_list);
6992 
6993 		object = get_freelist_nofreeze(s, slab);
6994 
6995 		while (object && refilled < max) {
6996 			p[refilled] = object;
6997 			object = get_freepointer(s, object);
6998 			maybe_wipe_obj_freeptr(s, p[refilled]);
6999 
7000 			refilled++;
7001 		}
7002 
7003 		/*
7004 		 * Freelist had more objects than we can accommodate, we need to
7005 		 * free them back. We can treat it like a detached freelist, just
7006 		 * need to find the tail object.
7007 		 */
7008 		if (unlikely(object)) {
7009 			void *head = object;
7010 			void *tail;
7011 			int cnt = 0;
7012 
7013 			do {
7014 				tail = object;
7015 				cnt++;
7016 				object = get_freepointer(s, object);
7017 			} while (object);
7018 			__slab_free(s, slab, head, tail, cnt, _RET_IP_);
7019 		}
7020 
7021 		if (refilled >= max)
7022 			break;
7023 	}
7024 
7025 	if (unlikely(!list_empty(&pc.slabs))) {
7026 		spin_lock_irqsave(&n->list_lock, flags);
7027 
7028 		list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7029 
7030 			if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial))
7031 				continue;
7032 
7033 			list_del(&slab->slab_list);
7034 			add_partial(n, slab, ADD_TO_HEAD);
7035 		}
7036 
7037 		spin_unlock_irqrestore(&n->list_lock, flags);
7038 
7039 		/* any slabs left are completely free and for discard */
7040 		list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7041 
7042 			list_del(&slab->slab_list);
7043 			discard_slab(s, slab);
7044 		}
7045 	}
7046 
7047 	return refilled;
7048 }
7049 
7050 #ifdef CONFIG_NUMA
7051 static unsigned int
7052 __refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7053 		     unsigned int max)
7054 {
7055 	struct zonelist *zonelist;
7056 	struct zoneref *z;
7057 	struct zone *zone;
7058 	enum zone_type highest_zoneidx = gfp_zone(gfp);
7059 	unsigned int cpuset_mems_cookie;
7060 	unsigned int refilled = 0;
7061 
7062 	/* see get_from_any_partial() for the defrag ratio description */
7063 	if (!s->remote_node_defrag_ratio ||
7064 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
7065 		return 0;
7066 
7067 	do {
7068 		cpuset_mems_cookie = read_mems_allowed_begin();
7069 		zonelist = node_zonelist(mempolicy_slab_node(), gfp);
7070 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
7071 			struct kmem_cache_node *n;
7072 			unsigned int r;
7073 
7074 			n = get_node(s, zone_to_nid(zone));
7075 
7076 			if (!n || !cpuset_zone_allowed(zone, gfp) ||
7077 					n->nr_partial <= s->min_partial)
7078 				continue;
7079 
7080 			r = __refill_objects_node(s, p, gfp, min, max, n,
7081 						  /* allow_spin = */ false);
7082 			refilled += r;
7083 
7084 			if (r >= min) {
7085 				/*
7086 				 * Don't check read_mems_allowed_retry() here -
7087 				 * if mems_allowed was updated in parallel, that
7088 				 * was a harmless race between allocation and
7089 				 * the cpuset update
7090 				 */
7091 				return refilled;
7092 			}
7093 			p += r;
7094 			min -= r;
7095 			max -= r;
7096 		}
7097 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
7098 
7099 	return refilled;
7100 }
7101 #else
7102 static inline unsigned int
7103 __refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7104 		     unsigned int max)
7105 {
7106 	return 0;
7107 }
7108 #endif
7109 
7110 static unsigned int
7111 refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7112 	       unsigned int max)
7113 {
7114 	int local_node = numa_mem_id();
7115 	unsigned int refilled;
7116 	struct slab *slab;
7117 
7118 	if (WARN_ON_ONCE(!gfpflags_allow_spinning(gfp)))
7119 		return 0;
7120 
7121 	refilled = __refill_objects_node(s, p, gfp, min, max,
7122 					 get_node(s, local_node),
7123 					 /* allow_spin = */ true);
7124 	if (refilled >= min)
7125 		return refilled;
7126 
7127 	refilled += __refill_objects_any(s, p + refilled, gfp, min - refilled,
7128 					 max - refilled);
7129 	if (refilled >= min)
7130 		return refilled;
7131 
7132 new_slab:
7133 
7134 	slab = new_slab(s, gfp, local_node);
7135 	if (!slab)
7136 		goto out;
7137 
7138 	stat(s, ALLOC_SLAB);
7139 
7140 	/*
7141 	 * TODO: possible optimization - if we know we will consume the whole
7142 	 * slab we might skip creating the freelist?
7143 	 */
7144 	refilled += alloc_from_new_slab(s, slab, p + refilled, max - refilled,
7145 					/* allow_spin = */ true);
7146 
7147 	if (refilled < min)
7148 		goto new_slab;
7149 
7150 out:
7151 	return refilled;
7152 }
7153 
7154 static inline
7155 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
7156 			    void **p)
7157 {
7158 	int i;
7159 
7160 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
7161 		for (i = 0; i < size; i++) {
7162 
7163 			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_,
7164 					     s->object_size);
7165 			if (unlikely(!p[i]))
7166 				goto error;
7167 
7168 			maybe_wipe_obj_freeptr(s, p[i]);
7169 		}
7170 	} else {
7171 		i = refill_objects(s, p, flags, size, size);
7172 		if (i < size)
7173 			goto error;
7174 		stat_add(s, ALLOC_SLOWPATH, i);
7175 	}
7176 
7177 	return i;
7178 
7179 error:
7180 	__kmem_cache_free_bulk(s, i, p);
7181 	return 0;
7182 
7183 }
7184 
7185 /*
7186  * Note that interrupts must be enabled when calling this function and gfp
7187  * flags must allow spinning.
7188  */
7189 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
7190 				 void **p)
7191 {
7192 	unsigned int i = 0;
7193 	void *kfence_obj;
7194 
7195 	if (!size)
7196 		return 0;
7197 
7198 	s = slab_pre_alloc_hook(s, flags);
7199 	if (unlikely(!s))
7200 		return 0;
7201 
7202 	/*
7203 	 * to make things simpler, only assume at most once kfence allocated
7204 	 * object per bulk allocation and choose its index randomly
7205 	 */
7206 	kfence_obj = kfence_alloc(s, s->object_size, flags);
7207 
7208 	if (unlikely(kfence_obj)) {
7209 		if (unlikely(size == 1)) {
7210 			p[0] = kfence_obj;
7211 			goto out;
7212 		}
7213 		size--;
7214 	}
7215 
7216 	i = alloc_from_pcs_bulk(s, flags, size, p);
7217 
7218 	if (i < size) {
7219 		/*
7220 		 * If we ran out of memory, don't bother with freeing back to
7221 		 * the percpu sheaves, we have bigger problems.
7222 		 */
7223 		if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) {
7224 			if (i > 0)
7225 				__kmem_cache_free_bulk(s, i, p);
7226 			if (kfence_obj)
7227 				__kfence_free(kfence_obj);
7228 			return 0;
7229 		}
7230 	}
7231 
7232 	if (unlikely(kfence_obj)) {
7233 		int idx = get_random_u32_below(size + 1);
7234 
7235 		if (idx != size)
7236 			p[size] = p[idx];
7237 		p[idx] = kfence_obj;
7238 
7239 		size++;
7240 	}
7241 
7242 out:
7243 	/*
7244 	 * memcg and kmem_cache debug support and memory initialization.
7245 	 * Done outside of the IRQ disabled fastpath loop.
7246 	 */
7247 	if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
7248 		    slab_want_init_on_alloc(flags, s), s->object_size))) {
7249 		return 0;
7250 	}
7251 
7252 	return size;
7253 }
7254 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
7255 
7256 /*
7257  * Object placement in a slab is made very easy because we always start at
7258  * offset 0. If we tune the size of the object to the alignment then we can
7259  * get the required alignment by putting one properly sized object after
7260  * another.
7261  *
7262  * Notice that the allocation order determines the sizes of the per cpu
7263  * caches. Each processor has always one slab available for allocations.
7264  * Increasing the allocation order reduces the number of times that slabs
7265  * must be moved on and off the partial lists and is therefore a factor in
7266  * locking overhead.
7267  */
7268 
7269 /*
7270  * Minimum / Maximum order of slab pages. This influences locking overhead
7271  * and slab fragmentation. A higher order reduces the number of partial slabs
7272  * and increases the number of allocations possible without having to
7273  * take the list_lock.
7274  */
7275 static unsigned int slub_min_order;
7276 static unsigned int slub_max_order =
7277 	IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
7278 static unsigned int slub_min_objects;
7279 
7280 /*
7281  * Calculate the order of allocation given an slab object size.
7282  *
7283  * The order of allocation has significant impact on performance and other
7284  * system components. Generally order 0 allocations should be preferred since
7285  * order 0 does not cause fragmentation in the page allocator. Larger objects
7286  * be problematic to put into order 0 slabs because there may be too much
7287  * unused space left. We go to a higher order if more than 1/16th of the slab
7288  * would be wasted.
7289  *
7290  * In order to reach satisfactory performance we must ensure that a minimum
7291  * number of objects is in one slab. Otherwise we may generate too much
7292  * activity on the partial lists which requires taking the list_lock. This is
7293  * less a concern for large slabs though which are rarely used.
7294  *
7295  * slab_max_order specifies the order where we begin to stop considering the
7296  * number of objects in a slab as critical. If we reach slab_max_order then
7297  * we try to keep the page order as low as possible. So we accept more waste
7298  * of space in favor of a small page order.
7299  *
7300  * Higher order allocations also allow the placement of more objects in a
7301  * slab and thereby reduce object handling overhead. If the user has
7302  * requested a higher minimum order then we start with that one instead of
7303  * the smallest order which will fit the object.
7304  */
7305 static inline unsigned int calc_slab_order(unsigned int size,
7306 		unsigned int min_order, unsigned int max_order,
7307 		unsigned int fract_leftover)
7308 {
7309 	unsigned int order;
7310 
7311 	for (order = min_order; order <= max_order; order++) {
7312 
7313 		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
7314 		unsigned int rem;
7315 
7316 		rem = slab_size % size;
7317 
7318 		if (rem <= slab_size / fract_leftover)
7319 			break;
7320 	}
7321 
7322 	return order;
7323 }
7324 
7325 static inline int calculate_order(unsigned int size)
7326 {
7327 	unsigned int order;
7328 	unsigned int min_objects;
7329 	unsigned int max_objects;
7330 	unsigned int min_order;
7331 
7332 	min_objects = slub_min_objects;
7333 	if (!min_objects) {
7334 		/*
7335 		 * Some architectures will only update present cpus when
7336 		 * onlining them, so don't trust the number if it's just 1. But
7337 		 * we also don't want to use nr_cpu_ids always, as on some other
7338 		 * architectures, there can be many possible cpus, but never
7339 		 * onlined. Here we compromise between trying to avoid too high
7340 		 * order on systems that appear larger than they are, and too
7341 		 * low order on systems that appear smaller than they are.
7342 		 */
7343 		unsigned int nr_cpus = num_present_cpus();
7344 		if (nr_cpus <= 1)
7345 			nr_cpus = nr_cpu_ids;
7346 		min_objects = 4 * (fls(nr_cpus) + 1);
7347 	}
7348 	/* min_objects can't be 0 because get_order(0) is undefined */
7349 	max_objects = max(order_objects(slub_max_order, size), 1U);
7350 	min_objects = min(min_objects, max_objects);
7351 
7352 	min_order = max_t(unsigned int, slub_min_order,
7353 			  get_order(min_objects * size));
7354 	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
7355 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
7356 
7357 	/*
7358 	 * Attempt to find best configuration for a slab. This works by first
7359 	 * attempting to generate a layout with the best possible configuration
7360 	 * and backing off gradually.
7361 	 *
7362 	 * We start with accepting at most 1/16 waste and try to find the
7363 	 * smallest order from min_objects-derived/slab_min_order up to
7364 	 * slab_max_order that will satisfy the constraint. Note that increasing
7365 	 * the order can only result in same or less fractional waste, not more.
7366 	 *
7367 	 * If that fails, we increase the acceptable fraction of waste and try
7368 	 * again. The last iteration with fraction of 1/2 would effectively
7369 	 * accept any waste and give us the order determined by min_objects, as
7370 	 * long as at least single object fits within slab_max_order.
7371 	 */
7372 	for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
7373 		order = calc_slab_order(size, min_order, slub_max_order,
7374 					fraction);
7375 		if (order <= slub_max_order)
7376 			return order;
7377 	}
7378 
7379 	/*
7380 	 * Doh this slab cannot be placed using slab_max_order.
7381 	 */
7382 	order = get_order(size);
7383 	if (order <= MAX_PAGE_ORDER)
7384 		return order;
7385 	return -ENOSYS;
7386 }
7387 
7388 static void
7389 init_kmem_cache_node(struct kmem_cache_node *n, struct node_barn *barn)
7390 {
7391 	n->nr_partial = 0;
7392 	spin_lock_init(&n->list_lock);
7393 	INIT_LIST_HEAD(&n->partial);
7394 #ifdef CONFIG_SLUB_DEBUG
7395 	atomic_long_set(&n->nr_slabs, 0);
7396 	atomic_long_set(&n->total_objects, 0);
7397 	INIT_LIST_HEAD(&n->full);
7398 #endif
7399 	n->barn = barn;
7400 	if (barn)
7401 		barn_init(barn);
7402 }
7403 
7404 #ifdef CONFIG_SLUB_STATS
7405 static inline int alloc_kmem_cache_stats(struct kmem_cache *s)
7406 {
7407 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
7408 			NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
7409 			sizeof(struct kmem_cache_stats));
7410 
7411 	s->cpu_stats = alloc_percpu(struct kmem_cache_stats);
7412 
7413 	if (!s->cpu_stats)
7414 		return 0;
7415 
7416 	return 1;
7417 }
7418 #endif
7419 
7420 static int init_percpu_sheaves(struct kmem_cache *s)
7421 {
7422 	static struct slab_sheaf bootstrap_sheaf = {};
7423 	int cpu;
7424 
7425 	for_each_possible_cpu(cpu) {
7426 		struct slub_percpu_sheaves *pcs;
7427 
7428 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
7429 
7430 		local_trylock_init(&pcs->lock);
7431 
7432 		/*
7433 		 * Bootstrap sheaf has zero size so fast-path allocation fails.
7434 		 * It has also size == s->sheaf_capacity, so fast-path free
7435 		 * fails. In the slow paths we recognize the situation by
7436 		 * checking s->sheaf_capacity. This allows fast paths to assume
7437 		 * s->cpu_sheaves and pcs->main always exists and are valid.
7438 		 * It's also safe to share the single static bootstrap_sheaf
7439 		 * with zero-sized objects array as it's never modified.
7440 		 *
7441 		 * Bootstrap_sheaf also has NULL pointer to kmem_cache so we
7442 		 * recognize it and not attempt to free it when destroying the
7443 		 * cache.
7444 		 *
7445 		 * We keep bootstrap_sheaf for kmem_cache and kmem_cache_node,
7446 		 * caches with debug enabled, and all caches with SLUB_TINY.
7447 		 * For kmalloc caches it's used temporarily during the initial
7448 		 * bootstrap.
7449 		 */
7450 		if (!s->sheaf_capacity)
7451 			pcs->main = &bootstrap_sheaf;
7452 		else
7453 			pcs->main = alloc_empty_sheaf(s, GFP_KERNEL);
7454 
7455 		if (!pcs->main)
7456 			return -ENOMEM;
7457 	}
7458 
7459 	return 0;
7460 }
7461 
7462 static struct kmem_cache *kmem_cache_node;
7463 
7464 /*
7465  * No kmalloc_node yet so do it by hand. We know that this is the first
7466  * slab on the node for this slabcache. There are no concurrent accesses
7467  * possible.
7468  *
7469  * Note that this function only works on the kmem_cache_node
7470  * when allocating for the kmem_cache_node. This is used for bootstrapping
7471  * memory on a fresh node that has no slab structures yet.
7472  */
7473 static void early_kmem_cache_node_alloc(int node)
7474 {
7475 	struct slab *slab;
7476 	struct kmem_cache_node *n;
7477 
7478 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
7479 
7480 	slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
7481 
7482 	BUG_ON(!slab);
7483 	if (slab_nid(slab) != node) {
7484 		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
7485 		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
7486 	}
7487 
7488 	n = slab->freelist;
7489 	BUG_ON(!n);
7490 #ifdef CONFIG_SLUB_DEBUG
7491 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
7492 #endif
7493 	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
7494 	slab->freelist = get_freepointer(kmem_cache_node, n);
7495 	slab->inuse = 1;
7496 	kmem_cache_node->node[node] = n;
7497 	init_kmem_cache_node(n, NULL);
7498 	inc_slabs_node(kmem_cache_node, node, slab->objects);
7499 
7500 	/*
7501 	 * No locks need to be taken here as it has just been
7502 	 * initialized and there is no concurrent access.
7503 	 */
7504 	__add_partial(n, slab, ADD_TO_HEAD);
7505 }
7506 
7507 static void free_kmem_cache_nodes(struct kmem_cache *s)
7508 {
7509 	int node;
7510 	struct kmem_cache_node *n;
7511 
7512 	for_each_kmem_cache_node(s, node, n) {
7513 		if (n->barn) {
7514 			WARN_ON(n->barn->nr_full);
7515 			WARN_ON(n->barn->nr_empty);
7516 			kfree(n->barn);
7517 			n->barn = NULL;
7518 		}
7519 
7520 		s->node[node] = NULL;
7521 		kmem_cache_free(kmem_cache_node, n);
7522 	}
7523 }
7524 
7525 void __kmem_cache_release(struct kmem_cache *s)
7526 {
7527 	cache_random_seq_destroy(s);
7528 	pcs_destroy(s);
7529 #ifdef CONFIG_SLUB_STATS
7530 	free_percpu(s->cpu_stats);
7531 #endif
7532 	free_kmem_cache_nodes(s);
7533 }
7534 
7535 static int init_kmem_cache_nodes(struct kmem_cache *s)
7536 {
7537 	int node;
7538 
7539 	for_each_node_mask(node, slab_nodes) {
7540 		struct kmem_cache_node *n;
7541 		struct node_barn *barn = NULL;
7542 
7543 		if (slab_state == DOWN) {
7544 			early_kmem_cache_node_alloc(node);
7545 			continue;
7546 		}
7547 
7548 		if (cache_has_sheaves(s)) {
7549 			barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
7550 
7551 			if (!barn)
7552 				return 0;
7553 		}
7554 
7555 		n = kmem_cache_alloc_node(kmem_cache_node,
7556 						GFP_KERNEL, node);
7557 		if (!n) {
7558 			kfree(barn);
7559 			return 0;
7560 		}
7561 
7562 		init_kmem_cache_node(n, barn);
7563 
7564 		s->node[node] = n;
7565 	}
7566 	return 1;
7567 }
7568 
7569 static unsigned int calculate_sheaf_capacity(struct kmem_cache *s,
7570 					     struct kmem_cache_args *args)
7571 
7572 {
7573 	unsigned int capacity;
7574 	size_t size;
7575 
7576 
7577 	if (IS_ENABLED(CONFIG_SLUB_TINY) || s->flags & SLAB_DEBUG_FLAGS)
7578 		return 0;
7579 
7580 	/*
7581 	 * Bootstrap caches can't have sheaves for now (SLAB_NO_OBJ_EXT).
7582 	 * SLAB_NOLEAKTRACE caches (e.g., kmemleak's object_cache) must not
7583 	 * have sheaves to avoid recursion when sheaf allocation triggers
7584 	 * kmemleak tracking.
7585 	 */
7586 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
7587 		return 0;
7588 
7589 	/*
7590 	 * For now we use roughly similar formula (divided by two as there are
7591 	 * two percpu sheaves) as what was used for percpu partial slabs, which
7592 	 * should result in similar lock contention (barn or list_lock)
7593 	 */
7594 	if (s->size >= PAGE_SIZE)
7595 		capacity = 4;
7596 	else if (s->size >= 1024)
7597 		capacity = 12;
7598 	else if (s->size >= 256)
7599 		capacity = 26;
7600 	else
7601 		capacity = 60;
7602 
7603 	/* Increment capacity to make sheaf exactly a kmalloc size bucket */
7604 	size = struct_size_t(struct slab_sheaf, objects, capacity);
7605 	size = kmalloc_size_roundup(size);
7606 	capacity = (size - struct_size_t(struct slab_sheaf, objects, 0)) / sizeof(void *);
7607 
7608 	/*
7609 	 * Respect an explicit request for capacity that's typically motivated by
7610 	 * expected maximum size of kmem_cache_prefill_sheaf() to not end up
7611 	 * using low-performance oversize sheaves
7612 	 */
7613 	return max(capacity, args->sheaf_capacity);
7614 }
7615 
7616 /*
7617  * calculate_sizes() determines the order and the distribution of data within
7618  * a slab object.
7619  */
7620 static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
7621 {
7622 	slab_flags_t flags = s->flags;
7623 	unsigned int size = s->object_size;
7624 	unsigned int aligned_size;
7625 	unsigned int order;
7626 
7627 	/*
7628 	 * Round up object size to the next word boundary. We can only
7629 	 * place the free pointer at word boundaries and this determines
7630 	 * the possible location of the free pointer.
7631 	 */
7632 	size = ALIGN(size, sizeof(void *));
7633 
7634 #ifdef CONFIG_SLUB_DEBUG
7635 	/*
7636 	 * Determine if we can poison the object itself. If the user of
7637 	 * the slab may touch the object after free or before allocation
7638 	 * then we should never poison the object itself.
7639 	 */
7640 	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
7641 			!s->ctor)
7642 		s->flags |= __OBJECT_POISON;
7643 	else
7644 		s->flags &= ~__OBJECT_POISON;
7645 
7646 
7647 	/*
7648 	 * If we are Redzoning and there is no space between the end of the
7649 	 * object and the following fields, add one word so the right Redzone
7650 	 * is non-empty.
7651 	 */
7652 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
7653 		size += sizeof(void *);
7654 #endif
7655 
7656 	/*
7657 	 * With that we have determined the number of bytes in actual use
7658 	 * by the object and redzoning.
7659 	 */
7660 	s->inuse = size;
7661 
7662 	if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
7663 	    (flags & SLAB_POISON) ||
7664 	    (s->ctor && !args->use_freeptr_offset) ||
7665 	    ((flags & SLAB_RED_ZONE) &&
7666 	     (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
7667 		/*
7668 		 * Relocate free pointer after the object if it is not
7669 		 * permitted to overwrite the first word of the object on
7670 		 * kmem_cache_free.
7671 		 *
7672 		 * This is the case if we do RCU, have a constructor, are
7673 		 * poisoning the objects, or are redzoning an object smaller
7674 		 * than sizeof(void *) or are redzoning an object with
7675 		 * slub_debug_orig_size() enabled, in which case the right
7676 		 * redzone may be extended.
7677 		 *
7678 		 * The assumption that s->offset >= s->inuse means free
7679 		 * pointer is outside of the object is used in the
7680 		 * freeptr_outside_object() function. If that is no
7681 		 * longer true, the function needs to be modified.
7682 		 */
7683 		s->offset = size;
7684 		size += sizeof(void *);
7685 	} else if (((flags & SLAB_TYPESAFE_BY_RCU) || s->ctor) &&
7686 			args->use_freeptr_offset) {
7687 		s->offset = args->freeptr_offset;
7688 	} else {
7689 		/*
7690 		 * Store freelist pointer near middle of object to keep
7691 		 * it away from the edges of the object to avoid small
7692 		 * sized over/underflows from neighboring allocations.
7693 		 */
7694 		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
7695 	}
7696 
7697 #ifdef CONFIG_SLUB_DEBUG
7698 	if (flags & SLAB_STORE_USER) {
7699 		/*
7700 		 * Need to store information about allocs and frees after
7701 		 * the object.
7702 		 */
7703 		size += 2 * sizeof(struct track);
7704 
7705 		/* Save the original kmalloc request size */
7706 		if (flags & SLAB_KMALLOC)
7707 			size += sizeof(unsigned long);
7708 	}
7709 #endif
7710 
7711 	kasan_cache_create(s, &size, &s->flags);
7712 #ifdef CONFIG_SLUB_DEBUG
7713 	if (flags & SLAB_RED_ZONE) {
7714 		/*
7715 		 * Add some empty padding so that we can catch
7716 		 * overwrites from earlier objects rather than let
7717 		 * tracking information or the free pointer be
7718 		 * corrupted if a user writes before the start
7719 		 * of the object.
7720 		 */
7721 		size += sizeof(void *);
7722 
7723 		s->red_left_pad = sizeof(void *);
7724 		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
7725 		size += s->red_left_pad;
7726 	}
7727 #endif
7728 
7729 	/*
7730 	 * SLUB stores one object immediately after another beginning from
7731 	 * offset 0. In order to align the objects we have to simply size
7732 	 * each object to conform to the alignment.
7733 	 */
7734 	aligned_size = ALIGN(size, s->align);
7735 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
7736 	if (slab_args_unmergeable(args, s->flags) &&
7737 			(aligned_size - size >= sizeof(struct slabobj_ext)))
7738 		s->flags |= SLAB_OBJ_EXT_IN_OBJ;
7739 #endif
7740 	size = aligned_size;
7741 
7742 	s->size = size;
7743 	s->reciprocal_size = reciprocal_value(size);
7744 	order = calculate_order(size);
7745 
7746 	if ((int)order < 0)
7747 		return 0;
7748 
7749 	s->allocflags = __GFP_COMP;
7750 
7751 	if (s->flags & SLAB_CACHE_DMA)
7752 		s->allocflags |= GFP_DMA;
7753 
7754 	if (s->flags & SLAB_CACHE_DMA32)
7755 		s->allocflags |= GFP_DMA32;
7756 
7757 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
7758 		s->allocflags |= __GFP_RECLAIMABLE;
7759 
7760 	/*
7761 	 * For KMALLOC_NORMAL caches we enable sheaves later by
7762 	 * bootstrap_kmalloc_sheaves() to avoid recursion
7763 	 */
7764 	if (!is_kmalloc_normal(s))
7765 		s->sheaf_capacity = calculate_sheaf_capacity(s, args);
7766 
7767 	/*
7768 	 * Determine the number of objects per slab
7769 	 */
7770 	s->oo = oo_make(order, size);
7771 	s->min = oo_make(get_order(size), size);
7772 
7773 	return !!oo_objects(s->oo);
7774 }
7775 
7776 static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
7777 {
7778 #ifdef CONFIG_SLUB_DEBUG
7779 	void *addr = slab_address(slab);
7780 	void *p;
7781 
7782 	if (!slab_add_kunit_errors())
7783 		slab_bug(s, "Objects remaining on __kmem_cache_shutdown()");
7784 
7785 	spin_lock(&object_map_lock);
7786 	__fill_map(object_map, s, slab);
7787 
7788 	for_each_object(p, s, addr, slab->objects) {
7789 
7790 		if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
7791 			if (slab_add_kunit_errors())
7792 				continue;
7793 			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
7794 			print_tracking(s, p);
7795 		}
7796 	}
7797 	spin_unlock(&object_map_lock);
7798 
7799 	__slab_err(slab);
7800 #endif
7801 }
7802 
7803 /*
7804  * Attempt to free all partial slabs on a node.
7805  * This is called from __kmem_cache_shutdown(). We must take list_lock
7806  * because sysfs file might still access partial list after the shutdowning.
7807  */
7808 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
7809 {
7810 	LIST_HEAD(discard);
7811 	struct slab *slab, *h;
7812 
7813 	BUG_ON(irqs_disabled());
7814 	spin_lock_irq(&n->list_lock);
7815 	list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
7816 		if (!slab->inuse) {
7817 			remove_partial(n, slab);
7818 			list_add(&slab->slab_list, &discard);
7819 		} else {
7820 			list_slab_objects(s, slab);
7821 		}
7822 	}
7823 	spin_unlock_irq(&n->list_lock);
7824 
7825 	list_for_each_entry_safe(slab, h, &discard, slab_list)
7826 		discard_slab(s, slab);
7827 }
7828 
7829 bool __kmem_cache_empty(struct kmem_cache *s)
7830 {
7831 	int node;
7832 	struct kmem_cache_node *n;
7833 
7834 	for_each_kmem_cache_node(s, node, n)
7835 		if (n->nr_partial || node_nr_slabs(n))
7836 			return false;
7837 	return true;
7838 }
7839 
7840 /*
7841  * Release all resources used by a slab cache.
7842  */
7843 int __kmem_cache_shutdown(struct kmem_cache *s)
7844 {
7845 	int node;
7846 	struct kmem_cache_node *n;
7847 
7848 	flush_all_cpus_locked(s);
7849 
7850 	/* we might have rcu sheaves in flight */
7851 	if (cache_has_sheaves(s))
7852 		rcu_barrier();
7853 
7854 	/* Attempt to free all objects */
7855 	for_each_kmem_cache_node(s, node, n) {
7856 		if (n->barn)
7857 			barn_shrink(s, n->barn);
7858 		free_partial(s, n);
7859 		if (n->nr_partial || node_nr_slabs(n))
7860 			return 1;
7861 	}
7862 	return 0;
7863 }
7864 
7865 #ifdef CONFIG_PRINTK
7866 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
7867 {
7868 	void *base;
7869 	int __maybe_unused i;
7870 	unsigned int objnr;
7871 	void *objp;
7872 	void *objp0;
7873 	struct kmem_cache *s = slab->slab_cache;
7874 	struct track __maybe_unused *trackp;
7875 
7876 	kpp->kp_ptr = object;
7877 	kpp->kp_slab = slab;
7878 	kpp->kp_slab_cache = s;
7879 	base = slab_address(slab);
7880 	objp0 = kasan_reset_tag(object);
7881 #ifdef CONFIG_SLUB_DEBUG
7882 	objp = restore_red_left(s, objp0);
7883 #else
7884 	objp = objp0;
7885 #endif
7886 	objnr = obj_to_index(s, slab, objp);
7887 	kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
7888 	objp = base + s->size * objnr;
7889 	kpp->kp_objp = objp;
7890 	if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
7891 			 || (objp - base) % s->size) ||
7892 	    !(s->flags & SLAB_STORE_USER))
7893 		return;
7894 #ifdef CONFIG_SLUB_DEBUG
7895 	objp = fixup_red_left(s, objp);
7896 	trackp = get_track(s, objp, TRACK_ALLOC);
7897 	kpp->kp_ret = (void *)trackp->addr;
7898 #ifdef CONFIG_STACKDEPOT
7899 	{
7900 		depot_stack_handle_t handle;
7901 		unsigned long *entries;
7902 		unsigned int nr_entries;
7903 
7904 		handle = READ_ONCE(trackp->handle);
7905 		if (handle) {
7906 			nr_entries = stack_depot_fetch(handle, &entries);
7907 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
7908 				kpp->kp_stack[i] = (void *)entries[i];
7909 		}
7910 
7911 		trackp = get_track(s, objp, TRACK_FREE);
7912 		handle = READ_ONCE(trackp->handle);
7913 		if (handle) {
7914 			nr_entries = stack_depot_fetch(handle, &entries);
7915 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
7916 				kpp->kp_free_stack[i] = (void *)entries[i];
7917 		}
7918 	}
7919 #endif
7920 #endif
7921 }
7922 #endif
7923 
7924 /********************************************************************
7925  *		Kmalloc subsystem
7926  *******************************************************************/
7927 
7928 static int __init setup_slub_min_order(const char *str, const struct kernel_param *kp)
7929 {
7930 	int ret;
7931 
7932 	ret = kstrtouint(str, 0, &slub_min_order);
7933 	if (ret)
7934 		return ret;
7935 
7936 	if (slub_min_order > slub_max_order)
7937 		slub_max_order = slub_min_order;
7938 
7939 	return 0;
7940 }
7941 
7942 static const struct kernel_param_ops param_ops_slab_min_order __initconst = {
7943 	.set = setup_slub_min_order,
7944 };
7945 __core_param_cb(slab_min_order, &param_ops_slab_min_order, &slub_min_order, 0);
7946 __core_param_cb(slub_min_order, &param_ops_slab_min_order, &slub_min_order, 0);
7947 
7948 static int __init setup_slub_max_order(const char *str, const struct kernel_param *kp)
7949 {
7950 	int ret;
7951 
7952 	ret = kstrtouint(str, 0, &slub_max_order);
7953 	if (ret)
7954 		return ret;
7955 
7956 	slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
7957 
7958 	if (slub_min_order > slub_max_order)
7959 		slub_min_order = slub_max_order;
7960 
7961 	return 0;
7962 }
7963 
7964 static const struct kernel_param_ops param_ops_slab_max_order __initconst = {
7965 	.set = setup_slub_max_order,
7966 };
7967 __core_param_cb(slab_max_order, &param_ops_slab_max_order, &slub_max_order, 0);
7968 __core_param_cb(slub_max_order, &param_ops_slab_max_order, &slub_max_order, 0);
7969 
7970 core_param(slab_min_objects, slub_min_objects, uint, 0);
7971 core_param(slub_min_objects, slub_min_objects, uint, 0);
7972 
7973 #ifdef CONFIG_NUMA
7974 static int __init setup_slab_strict_numa(const char *str, const struct kernel_param *kp)
7975 {
7976 	if (nr_node_ids > 1) {
7977 		static_branch_enable(&strict_numa);
7978 		pr_info("SLUB: Strict NUMA enabled.\n");
7979 	} else {
7980 		pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
7981 	}
7982 
7983 	return 0;
7984 }
7985 
7986 static const struct kernel_param_ops param_ops_slab_strict_numa __initconst = {
7987 	.flags = KERNEL_PARAM_OPS_FL_NOARG,
7988 	.set = setup_slab_strict_numa,
7989 };
7990 __core_param_cb(slab_strict_numa, &param_ops_slab_strict_numa, NULL, 0);
7991 #endif
7992 
7993 
7994 #ifdef CONFIG_HARDENED_USERCOPY
7995 /*
7996  * Rejects incorrectly sized objects and objects that are to be copied
7997  * to/from userspace but do not fall entirely within the containing slab
7998  * cache's usercopy region.
7999  *
8000  * Returns NULL if check passes, otherwise const char * to name of cache
8001  * to indicate an error.
8002  */
8003 void __check_heap_object(const void *ptr, unsigned long n,
8004 			 const struct slab *slab, bool to_user)
8005 {
8006 	struct kmem_cache *s;
8007 	unsigned int offset;
8008 	bool is_kfence = is_kfence_address(ptr);
8009 
8010 	ptr = kasan_reset_tag(ptr);
8011 
8012 	/* Find object and usable object size. */
8013 	s = slab->slab_cache;
8014 
8015 	/* Reject impossible pointers. */
8016 	if (ptr < slab_address(slab))
8017 		usercopy_abort("SLUB object not in SLUB page?!", NULL,
8018 			       to_user, 0, n);
8019 
8020 	/* Find offset within object. */
8021 	if (is_kfence)
8022 		offset = ptr - kfence_object_start(ptr);
8023 	else
8024 		offset = (ptr - slab_address(slab)) % s->size;
8025 
8026 	/* Adjust for redzone and reject if within the redzone. */
8027 	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
8028 		if (offset < s->red_left_pad)
8029 			usercopy_abort("SLUB object in left red zone",
8030 				       s->name, to_user, offset, n);
8031 		offset -= s->red_left_pad;
8032 	}
8033 
8034 	/* Allow address range falling entirely within usercopy region. */
8035 	if (offset >= s->useroffset &&
8036 	    offset - s->useroffset <= s->usersize &&
8037 	    n <= s->useroffset - offset + s->usersize)
8038 		return;
8039 
8040 	usercopy_abort("SLUB object", s->name, to_user, offset, n);
8041 }
8042 #endif /* CONFIG_HARDENED_USERCOPY */
8043 
8044 #define SHRINK_PROMOTE_MAX 32
8045 
8046 /*
8047  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
8048  * up most to the head of the partial lists. New allocations will then
8049  * fill those up and thus they can be removed from the partial lists.
8050  *
8051  * The slabs with the least items are placed last. This results in them
8052  * being allocated from last increasing the chance that the last objects
8053  * are freed in them.
8054  */
8055 static int __kmem_cache_do_shrink(struct kmem_cache *s)
8056 {
8057 	int node;
8058 	int i;
8059 	struct kmem_cache_node *n;
8060 	struct slab *slab;
8061 	struct slab *t;
8062 	struct list_head discard;
8063 	struct list_head promote[SHRINK_PROMOTE_MAX];
8064 	unsigned long flags;
8065 	int ret = 0;
8066 
8067 	for_each_kmem_cache_node(s, node, n) {
8068 		INIT_LIST_HEAD(&discard);
8069 		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
8070 			INIT_LIST_HEAD(promote + i);
8071 
8072 		if (n->barn)
8073 			barn_shrink(s, n->barn);
8074 
8075 		spin_lock_irqsave(&n->list_lock, flags);
8076 
8077 		/*
8078 		 * Build lists of slabs to discard or promote.
8079 		 *
8080 		 * Note that concurrent frees may occur while we hold the
8081 		 * list_lock. slab->inuse here is the upper limit.
8082 		 */
8083 		list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
8084 			int free = slab->objects - slab->inuse;
8085 
8086 			/* Do not reread slab->inuse */
8087 			barrier();
8088 
8089 			/* We do not keep full slabs on the list */
8090 			BUG_ON(free <= 0);
8091 
8092 			if (free == slab->objects) {
8093 				list_move(&slab->slab_list, &discard);
8094 				slab_clear_node_partial(slab);
8095 				n->nr_partial--;
8096 				dec_slabs_node(s, node, slab->objects);
8097 			} else if (free <= SHRINK_PROMOTE_MAX)
8098 				list_move(&slab->slab_list, promote + free - 1);
8099 		}
8100 
8101 		/*
8102 		 * Promote the slabs filled up most to the head of the
8103 		 * partial list.
8104 		 */
8105 		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
8106 			list_splice(promote + i, &n->partial);
8107 
8108 		spin_unlock_irqrestore(&n->list_lock, flags);
8109 
8110 		/* Release empty slabs */
8111 		list_for_each_entry_safe(slab, t, &discard, slab_list)
8112 			free_slab(s, slab);
8113 
8114 		if (node_nr_slabs(n))
8115 			ret = 1;
8116 	}
8117 
8118 	return ret;
8119 }
8120 
8121 int __kmem_cache_shrink(struct kmem_cache *s)
8122 {
8123 	flush_all(s);
8124 	return __kmem_cache_do_shrink(s);
8125 }
8126 
8127 static int slab_mem_going_offline_callback(void)
8128 {
8129 	struct kmem_cache *s;
8130 
8131 	mutex_lock(&slab_mutex);
8132 	list_for_each_entry(s, &slab_caches, list) {
8133 		flush_all_cpus_locked(s);
8134 		__kmem_cache_do_shrink(s);
8135 	}
8136 	mutex_unlock(&slab_mutex);
8137 
8138 	return 0;
8139 }
8140 
8141 static int slab_mem_going_online_callback(int nid)
8142 {
8143 	struct kmem_cache_node *n;
8144 	struct kmem_cache *s;
8145 	int ret = 0;
8146 
8147 	/*
8148 	 * We are bringing a node online. No memory is available yet. We must
8149 	 * allocate a kmem_cache_node structure in order to bring the node
8150 	 * online.
8151 	 */
8152 	mutex_lock(&slab_mutex);
8153 	list_for_each_entry(s, &slab_caches, list) {
8154 		struct node_barn *barn = NULL;
8155 
8156 		/*
8157 		 * The structure may already exist if the node was previously
8158 		 * onlined and offlined.
8159 		 */
8160 		if (get_node(s, nid))
8161 			continue;
8162 
8163 		if (cache_has_sheaves(s)) {
8164 			barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid);
8165 
8166 			if (!barn) {
8167 				ret = -ENOMEM;
8168 				goto out;
8169 			}
8170 		}
8171 
8172 		/*
8173 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
8174 		 *      since memory is not yet available from the node that
8175 		 *      is brought up.
8176 		 */
8177 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
8178 		if (!n) {
8179 			kfree(barn);
8180 			ret = -ENOMEM;
8181 			goto out;
8182 		}
8183 
8184 		init_kmem_cache_node(n, barn);
8185 
8186 		s->node[nid] = n;
8187 	}
8188 	/*
8189 	 * Any cache created after this point will also have kmem_cache_node
8190 	 * initialized for the new node.
8191 	 */
8192 	node_set(nid, slab_nodes);
8193 out:
8194 	mutex_unlock(&slab_mutex);
8195 	return ret;
8196 }
8197 
8198 static int slab_memory_callback(struct notifier_block *self,
8199 				unsigned long action, void *arg)
8200 {
8201 	struct node_notify *nn = arg;
8202 	int nid = nn->nid;
8203 	int ret = 0;
8204 
8205 	switch (action) {
8206 	case NODE_ADDING_FIRST_MEMORY:
8207 		ret = slab_mem_going_online_callback(nid);
8208 		break;
8209 	case NODE_REMOVING_LAST_MEMORY:
8210 		ret = slab_mem_going_offline_callback();
8211 		break;
8212 	}
8213 	if (ret)
8214 		ret = notifier_from_errno(ret);
8215 	else
8216 		ret = NOTIFY_OK;
8217 	return ret;
8218 }
8219 
8220 /********************************************************************
8221  *			Basic setup of slabs
8222  *******************************************************************/
8223 
8224 /*
8225  * Used for early kmem_cache structures that were allocated using
8226  * the page allocator. Allocate them properly then fix up the pointers
8227  * that may be pointing to the wrong kmem_cache structure.
8228  */
8229 
8230 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
8231 {
8232 	int node;
8233 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
8234 	struct kmem_cache_node *n;
8235 
8236 	memcpy(s, static_cache, kmem_cache->object_size);
8237 
8238 	for_each_kmem_cache_node(s, node, n) {
8239 		struct slab *p;
8240 
8241 		list_for_each_entry(p, &n->partial, slab_list)
8242 			p->slab_cache = s;
8243 
8244 #ifdef CONFIG_SLUB_DEBUG
8245 		list_for_each_entry(p, &n->full, slab_list)
8246 			p->slab_cache = s;
8247 #endif
8248 	}
8249 	list_add(&s->list, &slab_caches);
8250 	return s;
8251 }
8252 
8253 /*
8254  * Finish the sheaves initialization done normally by init_percpu_sheaves() and
8255  * init_kmem_cache_nodes(). For normal kmalloc caches we have to bootstrap it
8256  * since sheaves and barns are allocated by kmalloc.
8257  */
8258 static void __init bootstrap_cache_sheaves(struct kmem_cache *s)
8259 {
8260 	struct kmem_cache_args empty_args = {};
8261 	unsigned int capacity;
8262 	bool failed = false;
8263 	int node, cpu;
8264 
8265 	capacity = calculate_sheaf_capacity(s, &empty_args);
8266 
8267 	/* capacity can be 0 due to debugging or SLUB_TINY */
8268 	if (!capacity)
8269 		return;
8270 
8271 	for_each_node_mask(node, slab_nodes) {
8272 		struct node_barn *barn;
8273 
8274 		barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
8275 
8276 		if (!barn) {
8277 			failed = true;
8278 			goto out;
8279 		}
8280 
8281 		barn_init(barn);
8282 		get_node(s, node)->barn = barn;
8283 	}
8284 
8285 	for_each_possible_cpu(cpu) {
8286 		struct slub_percpu_sheaves *pcs;
8287 
8288 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
8289 
8290 		pcs->main = __alloc_empty_sheaf(s, GFP_KERNEL, capacity);
8291 
8292 		if (!pcs->main) {
8293 			failed = true;
8294 			break;
8295 		}
8296 	}
8297 
8298 out:
8299 	/*
8300 	 * It's still early in boot so treat this like same as a failure to
8301 	 * create the kmalloc cache in the first place
8302 	 */
8303 	if (failed)
8304 		panic("Out of memory when creating kmem_cache %s\n", s->name);
8305 
8306 	s->sheaf_capacity = capacity;
8307 }
8308 
8309 static void __init bootstrap_kmalloc_sheaves(void)
8310 {
8311 	enum kmalloc_cache_type type;
8312 
8313 	for (type = KMALLOC_NORMAL; type <= KMALLOC_RANDOM_END; type++) {
8314 		for (int idx = 0; idx < KMALLOC_SHIFT_HIGH + 1; idx++) {
8315 			if (kmalloc_caches[type][idx])
8316 				bootstrap_cache_sheaves(kmalloc_caches[type][idx]);
8317 		}
8318 	}
8319 }
8320 
8321 void __init kmem_cache_init(void)
8322 {
8323 	static __initdata struct kmem_cache boot_kmem_cache,
8324 		boot_kmem_cache_node;
8325 	int node;
8326 
8327 	if (debug_guardpage_minorder())
8328 		slub_max_order = 0;
8329 
8330 	/* Inform pointer hashing choice about slub debugging state. */
8331 	hash_pointers_finalize(__slub_debug_enabled());
8332 
8333 	kmem_cache_node = &boot_kmem_cache_node;
8334 	kmem_cache = &boot_kmem_cache;
8335 
8336 	/*
8337 	 * Initialize the nodemask for which we will allocate per node
8338 	 * structures. Here we don't need taking slab_mutex yet.
8339 	 */
8340 	for_each_node_state(node, N_MEMORY)
8341 		node_set(node, slab_nodes);
8342 
8343 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
8344 			sizeof(struct kmem_cache_node),
8345 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
8346 
8347 	hotplug_node_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
8348 
8349 	/* Able to allocate the per node structures */
8350 	slab_state = PARTIAL;
8351 
8352 	create_boot_cache(kmem_cache, "kmem_cache",
8353 			offsetof(struct kmem_cache, node) +
8354 				nr_node_ids * sizeof(struct kmem_cache_node *),
8355 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
8356 
8357 	kmem_cache = bootstrap(&boot_kmem_cache);
8358 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
8359 
8360 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
8361 	setup_kmalloc_cache_index_table();
8362 	create_kmalloc_caches();
8363 
8364 	bootstrap_kmalloc_sheaves();
8365 
8366 	/* Setup random freelists for each cache */
8367 	init_freelist_randomization();
8368 
8369 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
8370 				  slub_cpu_dead);
8371 
8372 	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
8373 		cache_line_size(),
8374 		slub_min_order, slub_max_order, slub_min_objects,
8375 		nr_cpu_ids, nr_node_ids);
8376 }
8377 
8378 void __init kmem_cache_init_late(void)
8379 {
8380 	flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM | WQ_PERCPU,
8381 				  0);
8382 	WARN_ON(!flushwq);
8383 #ifdef CONFIG_SLAB_FREELIST_RANDOM
8384 	prandom_init_once(&slab_rnd_state);
8385 #endif
8386 }
8387 
8388 int do_kmem_cache_create(struct kmem_cache *s, const char *name,
8389 			 unsigned int size, struct kmem_cache_args *args,
8390 			 slab_flags_t flags)
8391 {
8392 	int err = -EINVAL;
8393 
8394 	s->name = name;
8395 	s->size = s->object_size = size;
8396 
8397 	s->flags = kmem_cache_flags(flags, s->name);
8398 #ifdef CONFIG_SLAB_FREELIST_HARDENED
8399 	s->random = get_random_long();
8400 #endif
8401 	s->align = args->align;
8402 	s->ctor = args->ctor;
8403 #ifdef CONFIG_HARDENED_USERCOPY
8404 	s->useroffset = args->useroffset;
8405 	s->usersize = args->usersize;
8406 #endif
8407 
8408 	if (!calculate_sizes(args, s))
8409 		goto out;
8410 	if (disable_higher_order_debug) {
8411 		/*
8412 		 * Disable debugging flags that store metadata if the min slab
8413 		 * order increased.
8414 		 */
8415 		if (get_order(s->size) > get_order(s->object_size)) {
8416 			s->flags &= ~DEBUG_METADATA_FLAGS;
8417 			s->offset = 0;
8418 			if (!calculate_sizes(args, s))
8419 				goto out;
8420 		}
8421 	}
8422 
8423 #ifdef system_has_freelist_aba
8424 	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
8425 		/* Enable fast mode */
8426 		s->flags |= __CMPXCHG_DOUBLE;
8427 	}
8428 #endif
8429 
8430 	/*
8431 	 * The larger the object size is, the more slabs we want on the partial
8432 	 * list to avoid pounding the page allocator excessively.
8433 	 */
8434 	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
8435 	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
8436 
8437 	s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves);
8438 	if (!s->cpu_sheaves) {
8439 		err = -ENOMEM;
8440 		goto out;
8441 	}
8442 
8443 #ifdef CONFIG_NUMA
8444 	s->remote_node_defrag_ratio = 1000;
8445 #endif
8446 
8447 	/* Initialize the pre-computed randomized freelist if slab is up */
8448 	if (slab_state >= UP) {
8449 		if (init_cache_random_seq(s))
8450 			goto out;
8451 	}
8452 
8453 	if (!init_kmem_cache_nodes(s))
8454 		goto out;
8455 
8456 #ifdef CONFIG_SLUB_STATS
8457 	if (!alloc_kmem_cache_stats(s))
8458 		goto out;
8459 #endif
8460 
8461 	err = init_percpu_sheaves(s);
8462 	if (err)
8463 		goto out;
8464 
8465 	err = 0;
8466 
8467 	/* Mutex is not taken during early boot */
8468 	if (slab_state <= UP)
8469 		goto out;
8470 
8471 	/*
8472 	 * Failing to create sysfs files is not critical to SLUB functionality.
8473 	 * If it fails, proceed with cache creation without these files.
8474 	 */
8475 	if (sysfs_slab_add(s))
8476 		pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name);
8477 
8478 	if (s->flags & SLAB_STORE_USER)
8479 		debugfs_slab_add(s);
8480 
8481 out:
8482 	if (err)
8483 		__kmem_cache_release(s);
8484 	return err;
8485 }
8486 
8487 #ifdef SLAB_SUPPORTS_SYSFS
8488 static int count_inuse(struct slab *slab)
8489 {
8490 	return slab->inuse;
8491 }
8492 
8493 static int count_total(struct slab *slab)
8494 {
8495 	return slab->objects;
8496 }
8497 #endif
8498 
8499 #ifdef CONFIG_SLUB_DEBUG
8500 static void validate_slab(struct kmem_cache *s, struct slab *slab,
8501 			  unsigned long *obj_map)
8502 {
8503 	void *p;
8504 	void *addr = slab_address(slab);
8505 
8506 	if (!validate_slab_ptr(slab)) {
8507 		slab_err(s, slab, "Not a valid slab page");
8508 		return;
8509 	}
8510 
8511 	if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
8512 		return;
8513 
8514 	/* Now we know that a valid freelist exists */
8515 	__fill_map(obj_map, s, slab);
8516 	for_each_object(p, s, addr, slab->objects) {
8517 		u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
8518 			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
8519 
8520 		if (!check_object(s, slab, p, val))
8521 			break;
8522 	}
8523 }
8524 
8525 static int validate_slab_node(struct kmem_cache *s,
8526 		struct kmem_cache_node *n, unsigned long *obj_map)
8527 {
8528 	unsigned long count = 0;
8529 	struct slab *slab;
8530 	unsigned long flags;
8531 
8532 	spin_lock_irqsave(&n->list_lock, flags);
8533 
8534 	list_for_each_entry(slab, &n->partial, slab_list) {
8535 		validate_slab(s, slab, obj_map);
8536 		count++;
8537 	}
8538 	if (count != n->nr_partial) {
8539 		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
8540 		       s->name, count, n->nr_partial);
8541 		slab_add_kunit_errors();
8542 	}
8543 
8544 	if (!(s->flags & SLAB_STORE_USER))
8545 		goto out;
8546 
8547 	list_for_each_entry(slab, &n->full, slab_list) {
8548 		validate_slab(s, slab, obj_map);
8549 		count++;
8550 	}
8551 	if (count != node_nr_slabs(n)) {
8552 		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
8553 		       s->name, count, node_nr_slabs(n));
8554 		slab_add_kunit_errors();
8555 	}
8556 
8557 out:
8558 	spin_unlock_irqrestore(&n->list_lock, flags);
8559 	return count;
8560 }
8561 
8562 long validate_slab_cache(struct kmem_cache *s)
8563 {
8564 	int node;
8565 	unsigned long count = 0;
8566 	struct kmem_cache_node *n;
8567 	unsigned long *obj_map;
8568 
8569 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
8570 	if (!obj_map)
8571 		return -ENOMEM;
8572 
8573 	flush_all(s);
8574 	for_each_kmem_cache_node(s, node, n)
8575 		count += validate_slab_node(s, n, obj_map);
8576 
8577 	bitmap_free(obj_map);
8578 
8579 	return count;
8580 }
8581 EXPORT_SYMBOL(validate_slab_cache);
8582 
8583 #ifdef CONFIG_DEBUG_FS
8584 /*
8585  * Generate lists of code addresses where slabcache objects are allocated
8586  * and freed.
8587  */
8588 
8589 struct location {
8590 	depot_stack_handle_t handle;
8591 	unsigned long count;
8592 	unsigned long addr;
8593 	unsigned long waste;
8594 	long long sum_time;
8595 	long min_time;
8596 	long max_time;
8597 	long min_pid;
8598 	long max_pid;
8599 	DECLARE_BITMAP(cpus, NR_CPUS);
8600 	nodemask_t nodes;
8601 };
8602 
8603 struct loc_track {
8604 	unsigned long max;
8605 	unsigned long count;
8606 	struct location *loc;
8607 	loff_t idx;
8608 };
8609 
8610 static struct dentry *slab_debugfs_root;
8611 
8612 static void free_loc_track(struct loc_track *t)
8613 {
8614 	if (t->max)
8615 		free_pages((unsigned long)t->loc,
8616 			get_order(sizeof(struct location) * t->max));
8617 }
8618 
8619 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
8620 {
8621 	struct location *l;
8622 	int order;
8623 
8624 	order = get_order(sizeof(struct location) * max);
8625 
8626 	l = (void *)__get_free_pages(flags, order);
8627 	if (!l)
8628 		return 0;
8629 
8630 	if (t->count) {
8631 		memcpy(l, t->loc, sizeof(struct location) * t->count);
8632 		free_loc_track(t);
8633 	}
8634 	t->max = max;
8635 	t->loc = l;
8636 	return 1;
8637 }
8638 
8639 static int add_location(struct loc_track *t, struct kmem_cache *s,
8640 				const struct track *track,
8641 				unsigned int orig_size)
8642 {
8643 	long start, end, pos;
8644 	struct location *l;
8645 	unsigned long caddr, chandle, cwaste;
8646 	unsigned long age = jiffies - track->when;
8647 	depot_stack_handle_t handle = 0;
8648 	unsigned int waste = s->object_size - orig_size;
8649 
8650 #ifdef CONFIG_STACKDEPOT
8651 	handle = READ_ONCE(track->handle);
8652 #endif
8653 	start = -1;
8654 	end = t->count;
8655 
8656 	for ( ; ; ) {
8657 		pos = start + (end - start + 1) / 2;
8658 
8659 		/*
8660 		 * There is nothing at "end". If we end up there
8661 		 * we need to add something to before end.
8662 		 */
8663 		if (pos == end)
8664 			break;
8665 
8666 		l = &t->loc[pos];
8667 		caddr = l->addr;
8668 		chandle = l->handle;
8669 		cwaste = l->waste;
8670 		if ((track->addr == caddr) && (handle == chandle) &&
8671 			(waste == cwaste)) {
8672 
8673 			l->count++;
8674 			if (track->when) {
8675 				l->sum_time += age;
8676 				if (age < l->min_time)
8677 					l->min_time = age;
8678 				if (age > l->max_time)
8679 					l->max_time = age;
8680 
8681 				if (track->pid < l->min_pid)
8682 					l->min_pid = track->pid;
8683 				if (track->pid > l->max_pid)
8684 					l->max_pid = track->pid;
8685 
8686 				cpumask_set_cpu(track->cpu,
8687 						to_cpumask(l->cpus));
8688 			}
8689 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
8690 			return 1;
8691 		}
8692 
8693 		if (track->addr < caddr)
8694 			end = pos;
8695 		else if (track->addr == caddr && handle < chandle)
8696 			end = pos;
8697 		else if (track->addr == caddr && handle == chandle &&
8698 				waste < cwaste)
8699 			end = pos;
8700 		else
8701 			start = pos;
8702 	}
8703 
8704 	/*
8705 	 * Not found. Insert new tracking element.
8706 	 */
8707 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
8708 		return 0;
8709 
8710 	l = t->loc + pos;
8711 	if (pos < t->count)
8712 		memmove(l + 1, l,
8713 			(t->count - pos) * sizeof(struct location));
8714 	t->count++;
8715 	l->count = 1;
8716 	l->addr = track->addr;
8717 	l->sum_time = age;
8718 	l->min_time = age;
8719 	l->max_time = age;
8720 	l->min_pid = track->pid;
8721 	l->max_pid = track->pid;
8722 	l->handle = handle;
8723 	l->waste = waste;
8724 	cpumask_clear(to_cpumask(l->cpus));
8725 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
8726 	nodes_clear(l->nodes);
8727 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
8728 	return 1;
8729 }
8730 
8731 static void process_slab(struct loc_track *t, struct kmem_cache *s,
8732 		struct slab *slab, enum track_item alloc,
8733 		unsigned long *obj_map)
8734 {
8735 	void *addr = slab_address(slab);
8736 	bool is_alloc = (alloc == TRACK_ALLOC);
8737 	void *p;
8738 
8739 	__fill_map(obj_map, s, slab);
8740 
8741 	for_each_object(p, s, addr, slab->objects)
8742 		if (!test_bit(__obj_to_index(s, addr, p), obj_map))
8743 			add_location(t, s, get_track(s, p, alloc),
8744 				     is_alloc ? get_orig_size(s, p) :
8745 						s->object_size);
8746 }
8747 #endif  /* CONFIG_DEBUG_FS   */
8748 #endif	/* CONFIG_SLUB_DEBUG */
8749 
8750 #ifdef SLAB_SUPPORTS_SYSFS
8751 enum slab_stat_type {
8752 	SL_ALL,			/* All slabs */
8753 	SL_PARTIAL,		/* Only partially allocated slabs */
8754 	SL_CPU,			/* Only slabs used for cpu caches */
8755 	SL_OBJECTS,		/* Determine allocated objects not slabs */
8756 	SL_TOTAL		/* Determine object capacity not slabs */
8757 };
8758 
8759 #define SO_ALL		(1 << SL_ALL)
8760 #define SO_PARTIAL	(1 << SL_PARTIAL)
8761 #define SO_CPU		(1 << SL_CPU)
8762 #define SO_OBJECTS	(1 << SL_OBJECTS)
8763 #define SO_TOTAL	(1 << SL_TOTAL)
8764 
8765 static ssize_t show_slab_objects(struct kmem_cache *s,
8766 				 char *buf, unsigned long flags)
8767 {
8768 	unsigned long total = 0;
8769 	int node;
8770 	int x;
8771 	unsigned long *nodes;
8772 	int len = 0;
8773 
8774 	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
8775 	if (!nodes)
8776 		return -ENOMEM;
8777 
8778 	/*
8779 	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
8780 	 * already held which will conflict with an existing lock order:
8781 	 *
8782 	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
8783 	 *
8784 	 * We don't really need mem_hotplug_lock (to hold off
8785 	 * slab_mem_going_offline_callback) here because slab's memory hot
8786 	 * unplug code doesn't destroy the kmem_cache->node[] data.
8787 	 */
8788 
8789 #ifdef CONFIG_SLUB_DEBUG
8790 	if (flags & SO_ALL) {
8791 		struct kmem_cache_node *n;
8792 
8793 		for_each_kmem_cache_node(s, node, n) {
8794 
8795 			if (flags & SO_TOTAL)
8796 				x = node_nr_objs(n);
8797 			else if (flags & SO_OBJECTS)
8798 				x = node_nr_objs(n) - count_partial(n, count_free);
8799 			else
8800 				x = node_nr_slabs(n);
8801 			total += x;
8802 			nodes[node] += x;
8803 		}
8804 
8805 	} else
8806 #endif
8807 	if (flags & SO_PARTIAL) {
8808 		struct kmem_cache_node *n;
8809 
8810 		for_each_kmem_cache_node(s, node, n) {
8811 			if (flags & SO_TOTAL)
8812 				x = count_partial(n, count_total);
8813 			else if (flags & SO_OBJECTS)
8814 				x = count_partial(n, count_inuse);
8815 			else
8816 				x = n->nr_partial;
8817 			total += x;
8818 			nodes[node] += x;
8819 		}
8820 	}
8821 
8822 	len += sysfs_emit_at(buf, len, "%lu", total);
8823 #ifdef CONFIG_NUMA
8824 	for (node = 0; node < nr_node_ids; node++) {
8825 		if (nodes[node])
8826 			len += sysfs_emit_at(buf, len, " N%d=%lu",
8827 					     node, nodes[node]);
8828 	}
8829 #endif
8830 	len += sysfs_emit_at(buf, len, "\n");
8831 	kfree(nodes);
8832 
8833 	return len;
8834 }
8835 
8836 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
8837 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
8838 
8839 struct slab_attribute {
8840 	struct attribute attr;
8841 	ssize_t (*show)(struct kmem_cache *s, char *buf);
8842 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
8843 };
8844 
8845 #define SLAB_ATTR_RO(_name) \
8846 	static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
8847 
8848 #define SLAB_ATTR(_name) \
8849 	static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
8850 
8851 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
8852 {
8853 	return sysfs_emit(buf, "%u\n", s->size);
8854 }
8855 SLAB_ATTR_RO(slab_size);
8856 
8857 static ssize_t align_show(struct kmem_cache *s, char *buf)
8858 {
8859 	return sysfs_emit(buf, "%u\n", s->align);
8860 }
8861 SLAB_ATTR_RO(align);
8862 
8863 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
8864 {
8865 	return sysfs_emit(buf, "%u\n", s->object_size);
8866 }
8867 SLAB_ATTR_RO(object_size);
8868 
8869 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
8870 {
8871 	return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
8872 }
8873 SLAB_ATTR_RO(objs_per_slab);
8874 
8875 static ssize_t order_show(struct kmem_cache *s, char *buf)
8876 {
8877 	return sysfs_emit(buf, "%u\n", oo_order(s->oo));
8878 }
8879 SLAB_ATTR_RO(order);
8880 
8881 static ssize_t sheaf_capacity_show(struct kmem_cache *s, char *buf)
8882 {
8883 	return sysfs_emit(buf, "%u\n", s->sheaf_capacity);
8884 }
8885 SLAB_ATTR_RO(sheaf_capacity);
8886 
8887 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
8888 {
8889 	return sysfs_emit(buf, "%lu\n", s->min_partial);
8890 }
8891 
8892 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
8893 				 size_t length)
8894 {
8895 	unsigned long min;
8896 	int err;
8897 
8898 	err = kstrtoul(buf, 10, &min);
8899 	if (err)
8900 		return err;
8901 
8902 	s->min_partial = min;
8903 	return length;
8904 }
8905 SLAB_ATTR(min_partial);
8906 
8907 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
8908 {
8909 	return sysfs_emit(buf, "0\n");
8910 }
8911 
8912 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
8913 				 size_t length)
8914 {
8915 	unsigned int objects;
8916 	int err;
8917 
8918 	err = kstrtouint(buf, 10, &objects);
8919 	if (err)
8920 		return err;
8921 	if (objects)
8922 		return -EINVAL;
8923 
8924 	return length;
8925 }
8926 SLAB_ATTR(cpu_partial);
8927 
8928 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
8929 {
8930 	if (!s->ctor)
8931 		return 0;
8932 	return sysfs_emit(buf, "%pS\n", s->ctor);
8933 }
8934 SLAB_ATTR_RO(ctor);
8935 
8936 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
8937 {
8938 	return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
8939 }
8940 SLAB_ATTR_RO(aliases);
8941 
8942 static ssize_t partial_show(struct kmem_cache *s, char *buf)
8943 {
8944 	return show_slab_objects(s, buf, SO_PARTIAL);
8945 }
8946 SLAB_ATTR_RO(partial);
8947 
8948 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
8949 {
8950 	return show_slab_objects(s, buf, SO_CPU);
8951 }
8952 SLAB_ATTR_RO(cpu_slabs);
8953 
8954 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
8955 {
8956 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
8957 }
8958 SLAB_ATTR_RO(objects_partial);
8959 
8960 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
8961 {
8962 	return sysfs_emit(buf, "0(0)\n");
8963 }
8964 SLAB_ATTR_RO(slabs_cpu_partial);
8965 
8966 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
8967 {
8968 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
8969 }
8970 SLAB_ATTR_RO(reclaim_account);
8971 
8972 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
8973 {
8974 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
8975 }
8976 SLAB_ATTR_RO(hwcache_align);
8977 
8978 #ifdef CONFIG_ZONE_DMA
8979 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
8980 {
8981 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
8982 }
8983 SLAB_ATTR_RO(cache_dma);
8984 #endif
8985 
8986 #ifdef CONFIG_HARDENED_USERCOPY
8987 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
8988 {
8989 	return sysfs_emit(buf, "%u\n", s->usersize);
8990 }
8991 SLAB_ATTR_RO(usersize);
8992 #endif
8993 
8994 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
8995 {
8996 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
8997 }
8998 SLAB_ATTR_RO(destroy_by_rcu);
8999 
9000 #ifdef CONFIG_SLUB_DEBUG
9001 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
9002 {
9003 	return show_slab_objects(s, buf, SO_ALL);
9004 }
9005 SLAB_ATTR_RO(slabs);
9006 
9007 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
9008 {
9009 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
9010 }
9011 SLAB_ATTR_RO(total_objects);
9012 
9013 static ssize_t objects_show(struct kmem_cache *s, char *buf)
9014 {
9015 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
9016 }
9017 SLAB_ATTR_RO(objects);
9018 
9019 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
9020 {
9021 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
9022 }
9023 SLAB_ATTR_RO(sanity_checks);
9024 
9025 static ssize_t trace_show(struct kmem_cache *s, char *buf)
9026 {
9027 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
9028 }
9029 SLAB_ATTR_RO(trace);
9030 
9031 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
9032 {
9033 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
9034 }
9035 
9036 SLAB_ATTR_RO(red_zone);
9037 
9038 static ssize_t poison_show(struct kmem_cache *s, char *buf)
9039 {
9040 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
9041 }
9042 
9043 SLAB_ATTR_RO(poison);
9044 
9045 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
9046 {
9047 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
9048 }
9049 
9050 SLAB_ATTR_RO(store_user);
9051 
9052 static ssize_t validate_show(struct kmem_cache *s, char *buf)
9053 {
9054 	return 0;
9055 }
9056 
9057 static ssize_t validate_store(struct kmem_cache *s,
9058 			const char *buf, size_t length)
9059 {
9060 	int ret = -EINVAL;
9061 
9062 	if (buf[0] == '1' && kmem_cache_debug(s)) {
9063 		ret = validate_slab_cache(s);
9064 		if (ret >= 0)
9065 			ret = length;
9066 	}
9067 	return ret;
9068 }
9069 SLAB_ATTR(validate);
9070 
9071 #endif /* CONFIG_SLUB_DEBUG */
9072 
9073 #ifdef CONFIG_FAILSLAB
9074 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
9075 {
9076 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
9077 }
9078 
9079 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
9080 				size_t length)
9081 {
9082 	if (s->refcount > 1)
9083 		return -EINVAL;
9084 
9085 	if (buf[0] == '1')
9086 		WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
9087 	else
9088 		WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
9089 
9090 	return length;
9091 }
9092 SLAB_ATTR(failslab);
9093 #endif
9094 
9095 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
9096 {
9097 	return 0;
9098 }
9099 
9100 static ssize_t shrink_store(struct kmem_cache *s,
9101 			const char *buf, size_t length)
9102 {
9103 	if (buf[0] == '1')
9104 		kmem_cache_shrink(s);
9105 	else
9106 		return -EINVAL;
9107 	return length;
9108 }
9109 SLAB_ATTR(shrink);
9110 
9111 #ifdef CONFIG_NUMA
9112 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
9113 {
9114 	return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
9115 }
9116 
9117 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
9118 				const char *buf, size_t length)
9119 {
9120 	unsigned int ratio;
9121 	int err;
9122 
9123 	err = kstrtouint(buf, 10, &ratio);
9124 	if (err)
9125 		return err;
9126 	if (ratio > 100)
9127 		return -ERANGE;
9128 
9129 	s->remote_node_defrag_ratio = ratio * 10;
9130 
9131 	return length;
9132 }
9133 SLAB_ATTR(remote_node_defrag_ratio);
9134 #endif
9135 
9136 #ifdef CONFIG_SLUB_STATS
9137 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
9138 {
9139 	unsigned long sum  = 0;
9140 	int cpu;
9141 	int len = 0;
9142 	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
9143 
9144 	if (!data)
9145 		return -ENOMEM;
9146 
9147 	for_each_online_cpu(cpu) {
9148 		unsigned int x = per_cpu_ptr(s->cpu_stats, cpu)->stat[si];
9149 
9150 		data[cpu] = x;
9151 		sum += x;
9152 	}
9153 
9154 	len += sysfs_emit_at(buf, len, "%lu", sum);
9155 
9156 #ifdef CONFIG_SMP
9157 	for_each_online_cpu(cpu) {
9158 		if (data[cpu])
9159 			len += sysfs_emit_at(buf, len, " C%d=%u",
9160 					     cpu, data[cpu]);
9161 	}
9162 #endif
9163 	kfree(data);
9164 	len += sysfs_emit_at(buf, len, "\n");
9165 
9166 	return len;
9167 }
9168 
9169 static void clear_stat(struct kmem_cache *s, enum stat_item si)
9170 {
9171 	int cpu;
9172 
9173 	for_each_online_cpu(cpu)
9174 		per_cpu_ptr(s->cpu_stats, cpu)->stat[si] = 0;
9175 }
9176 
9177 #define STAT_ATTR(si, text) 					\
9178 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
9179 {								\
9180 	return show_stat(s, buf, si);				\
9181 }								\
9182 static ssize_t text##_store(struct kmem_cache *s,		\
9183 				const char *buf, size_t length)	\
9184 {								\
9185 	if (buf[0] != '0')					\
9186 		return -EINVAL;					\
9187 	clear_stat(s, si);					\
9188 	return length;						\
9189 }								\
9190 SLAB_ATTR(text);						\
9191 
9192 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
9193 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
9194 STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf);
9195 STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail);
9196 STAT_ATTR(FREE_FASTPATH, free_fastpath);
9197 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
9198 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
9199 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
9200 STAT_ATTR(ALLOC_SLAB, alloc_slab);
9201 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
9202 STAT_ATTR(FREE_SLAB, free_slab);
9203 STAT_ATTR(ORDER_FALLBACK, order_fallback);
9204 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
9205 STAT_ATTR(SHEAF_FLUSH, sheaf_flush);
9206 STAT_ATTR(SHEAF_REFILL, sheaf_refill);
9207 STAT_ATTR(SHEAF_ALLOC, sheaf_alloc);
9208 STAT_ATTR(SHEAF_FREE, sheaf_free);
9209 STAT_ATTR(BARN_GET, barn_get);
9210 STAT_ATTR(BARN_GET_FAIL, barn_get_fail);
9211 STAT_ATTR(BARN_PUT, barn_put);
9212 STAT_ATTR(BARN_PUT_FAIL, barn_put_fail);
9213 STAT_ATTR(SHEAF_PREFILL_FAST, sheaf_prefill_fast);
9214 STAT_ATTR(SHEAF_PREFILL_SLOW, sheaf_prefill_slow);
9215 STAT_ATTR(SHEAF_PREFILL_OVERSIZE, sheaf_prefill_oversize);
9216 STAT_ATTR(SHEAF_RETURN_FAST, sheaf_return_fast);
9217 STAT_ATTR(SHEAF_RETURN_SLOW, sheaf_return_slow);
9218 #endif	/* CONFIG_SLUB_STATS */
9219 
9220 #ifdef CONFIG_KFENCE
9221 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
9222 {
9223 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
9224 }
9225 
9226 static ssize_t skip_kfence_store(struct kmem_cache *s,
9227 			const char *buf, size_t length)
9228 {
9229 	int ret = length;
9230 
9231 	if (buf[0] == '0')
9232 		s->flags &= ~SLAB_SKIP_KFENCE;
9233 	else if (buf[0] == '1')
9234 		s->flags |= SLAB_SKIP_KFENCE;
9235 	else
9236 		ret = -EINVAL;
9237 
9238 	return ret;
9239 }
9240 SLAB_ATTR(skip_kfence);
9241 #endif
9242 
9243 static struct attribute *slab_attrs[] = {
9244 	&slab_size_attr.attr,
9245 	&object_size_attr.attr,
9246 	&objs_per_slab_attr.attr,
9247 	&order_attr.attr,
9248 	&sheaf_capacity_attr.attr,
9249 	&min_partial_attr.attr,
9250 	&cpu_partial_attr.attr,
9251 	&objects_partial_attr.attr,
9252 	&partial_attr.attr,
9253 	&cpu_slabs_attr.attr,
9254 	&ctor_attr.attr,
9255 	&aliases_attr.attr,
9256 	&align_attr.attr,
9257 	&hwcache_align_attr.attr,
9258 	&reclaim_account_attr.attr,
9259 	&destroy_by_rcu_attr.attr,
9260 	&shrink_attr.attr,
9261 	&slabs_cpu_partial_attr.attr,
9262 #ifdef CONFIG_SLUB_DEBUG
9263 	&total_objects_attr.attr,
9264 	&objects_attr.attr,
9265 	&slabs_attr.attr,
9266 	&sanity_checks_attr.attr,
9267 	&trace_attr.attr,
9268 	&red_zone_attr.attr,
9269 	&poison_attr.attr,
9270 	&store_user_attr.attr,
9271 	&validate_attr.attr,
9272 #endif
9273 #ifdef CONFIG_ZONE_DMA
9274 	&cache_dma_attr.attr,
9275 #endif
9276 #ifdef CONFIG_NUMA
9277 	&remote_node_defrag_ratio_attr.attr,
9278 #endif
9279 #ifdef CONFIG_SLUB_STATS
9280 	&alloc_fastpath_attr.attr,
9281 	&alloc_slowpath_attr.attr,
9282 	&free_rcu_sheaf_attr.attr,
9283 	&free_rcu_sheaf_fail_attr.attr,
9284 	&free_fastpath_attr.attr,
9285 	&free_slowpath_attr.attr,
9286 	&free_add_partial_attr.attr,
9287 	&free_remove_partial_attr.attr,
9288 	&alloc_slab_attr.attr,
9289 	&alloc_node_mismatch_attr.attr,
9290 	&free_slab_attr.attr,
9291 	&order_fallback_attr.attr,
9292 	&cmpxchg_double_fail_attr.attr,
9293 	&sheaf_flush_attr.attr,
9294 	&sheaf_refill_attr.attr,
9295 	&sheaf_alloc_attr.attr,
9296 	&sheaf_free_attr.attr,
9297 	&barn_get_attr.attr,
9298 	&barn_get_fail_attr.attr,
9299 	&barn_put_attr.attr,
9300 	&barn_put_fail_attr.attr,
9301 	&sheaf_prefill_fast_attr.attr,
9302 	&sheaf_prefill_slow_attr.attr,
9303 	&sheaf_prefill_oversize_attr.attr,
9304 	&sheaf_return_fast_attr.attr,
9305 	&sheaf_return_slow_attr.attr,
9306 #endif
9307 #ifdef CONFIG_FAILSLAB
9308 	&failslab_attr.attr,
9309 #endif
9310 #ifdef CONFIG_HARDENED_USERCOPY
9311 	&usersize_attr.attr,
9312 #endif
9313 #ifdef CONFIG_KFENCE
9314 	&skip_kfence_attr.attr,
9315 #endif
9316 
9317 	NULL
9318 };
9319 
9320 static const struct attribute_group slab_attr_group = {
9321 	.attrs = slab_attrs,
9322 };
9323 
9324 static ssize_t slab_attr_show(struct kobject *kobj,
9325 				struct attribute *attr,
9326 				char *buf)
9327 {
9328 	struct slab_attribute *attribute;
9329 	struct kmem_cache *s;
9330 
9331 	attribute = to_slab_attr(attr);
9332 	s = to_slab(kobj);
9333 
9334 	if (!attribute->show)
9335 		return -EIO;
9336 
9337 	return attribute->show(s, buf);
9338 }
9339 
9340 static ssize_t slab_attr_store(struct kobject *kobj,
9341 				struct attribute *attr,
9342 				const char *buf, size_t len)
9343 {
9344 	struct slab_attribute *attribute;
9345 	struct kmem_cache *s;
9346 
9347 	attribute = to_slab_attr(attr);
9348 	s = to_slab(kobj);
9349 
9350 	if (!attribute->store)
9351 		return -EIO;
9352 
9353 	return attribute->store(s, buf, len);
9354 }
9355 
9356 static void kmem_cache_release(struct kobject *k)
9357 {
9358 	slab_kmem_cache_release(to_slab(k));
9359 }
9360 
9361 static const struct sysfs_ops slab_sysfs_ops = {
9362 	.show = slab_attr_show,
9363 	.store = slab_attr_store,
9364 };
9365 
9366 static const struct kobj_type slab_ktype = {
9367 	.sysfs_ops = &slab_sysfs_ops,
9368 	.release = kmem_cache_release,
9369 };
9370 
9371 static struct kset *slab_kset;
9372 
9373 static inline struct kset *cache_kset(struct kmem_cache *s)
9374 {
9375 	return slab_kset;
9376 }
9377 
9378 #define ID_STR_LENGTH 32
9379 
9380 /* Create a unique string id for a slab cache:
9381  *
9382  * Format	:[flags-]size
9383  */
9384 static char *create_unique_id(struct kmem_cache *s)
9385 {
9386 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
9387 	char *p = name;
9388 
9389 	if (!name)
9390 		return ERR_PTR(-ENOMEM);
9391 
9392 	*p++ = ':';
9393 	/*
9394 	 * First flags affecting slabcache operations. We will only
9395 	 * get here for aliasable slabs so we do not need to support
9396 	 * too many flags. The flags here must cover all flags that
9397 	 * are matched during merging to guarantee that the id is
9398 	 * unique.
9399 	 */
9400 	if (s->flags & SLAB_CACHE_DMA)
9401 		*p++ = 'd';
9402 	if (s->flags & SLAB_CACHE_DMA32)
9403 		*p++ = 'D';
9404 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
9405 		*p++ = 'a';
9406 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
9407 		*p++ = 'F';
9408 	if (s->flags & SLAB_ACCOUNT)
9409 		*p++ = 'A';
9410 	if (p != name + 1)
9411 		*p++ = '-';
9412 	p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
9413 
9414 	if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
9415 		kfree(name);
9416 		return ERR_PTR(-EINVAL);
9417 	}
9418 	kmsan_unpoison_memory(name, p - name);
9419 	return name;
9420 }
9421 
9422 static int sysfs_slab_add(struct kmem_cache *s)
9423 {
9424 	int err;
9425 	const char *name;
9426 	struct kset *kset = cache_kset(s);
9427 	int unmergeable = slab_unmergeable(s);
9428 
9429 	if (!unmergeable && disable_higher_order_debug &&
9430 			(slub_debug & DEBUG_METADATA_FLAGS))
9431 		unmergeable = 1;
9432 
9433 	if (unmergeable) {
9434 		/*
9435 		 * Slabcache can never be merged so we can use the name proper.
9436 		 * This is typically the case for debug situations. In that
9437 		 * case we can catch duplicate names easily.
9438 		 */
9439 		sysfs_remove_link(&slab_kset->kobj, s->name);
9440 		name = s->name;
9441 	} else {
9442 		/*
9443 		 * Create a unique name for the slab as a target
9444 		 * for the symlinks.
9445 		 */
9446 		name = create_unique_id(s);
9447 		if (IS_ERR(name))
9448 			return PTR_ERR(name);
9449 	}
9450 
9451 	s->kobj.kset = kset;
9452 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
9453 	if (err)
9454 		goto out;
9455 
9456 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
9457 	if (err)
9458 		goto out_del_kobj;
9459 
9460 	if (!unmergeable) {
9461 		/* Setup first alias */
9462 		sysfs_slab_alias(s, s->name);
9463 	}
9464 out:
9465 	if (!unmergeable)
9466 		kfree(name);
9467 	return err;
9468 out_del_kobj:
9469 	kobject_del(&s->kobj);
9470 	goto out;
9471 }
9472 
9473 void sysfs_slab_unlink(struct kmem_cache *s)
9474 {
9475 	if (s->kobj.state_in_sysfs)
9476 		kobject_del(&s->kobj);
9477 }
9478 
9479 void sysfs_slab_release(struct kmem_cache *s)
9480 {
9481 	kobject_put(&s->kobj);
9482 }
9483 
9484 /*
9485  * Need to buffer aliases during bootup until sysfs becomes
9486  * available lest we lose that information.
9487  */
9488 struct saved_alias {
9489 	struct kmem_cache *s;
9490 	const char *name;
9491 	struct saved_alias *next;
9492 };
9493 
9494 static struct saved_alias *alias_list;
9495 
9496 int sysfs_slab_alias(struct kmem_cache *s, const char *name)
9497 {
9498 	struct saved_alias *al;
9499 
9500 	if (slab_state == FULL) {
9501 		/*
9502 		 * If we have a leftover link then remove it.
9503 		 */
9504 		sysfs_remove_link(&slab_kset->kobj, name);
9505 		/*
9506 		 * The original cache may have failed to generate sysfs file.
9507 		 * In that case, sysfs_create_link() returns -ENOENT and
9508 		 * symbolic link creation is skipped.
9509 		 */
9510 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
9511 	}
9512 
9513 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
9514 	if (!al)
9515 		return -ENOMEM;
9516 
9517 	al->s = s;
9518 	al->name = name;
9519 	al->next = alias_list;
9520 	alias_list = al;
9521 	kmsan_unpoison_memory(al, sizeof(*al));
9522 	return 0;
9523 }
9524 
9525 static int __init slab_sysfs_init(void)
9526 {
9527 	struct kmem_cache *s;
9528 	int err;
9529 
9530 	mutex_lock(&slab_mutex);
9531 
9532 	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
9533 	if (!slab_kset) {
9534 		mutex_unlock(&slab_mutex);
9535 		pr_err("Cannot register slab subsystem.\n");
9536 		return -ENOMEM;
9537 	}
9538 
9539 	slab_state = FULL;
9540 
9541 	list_for_each_entry(s, &slab_caches, list) {
9542 		err = sysfs_slab_add(s);
9543 		if (err)
9544 			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
9545 			       s->name);
9546 	}
9547 
9548 	while (alias_list) {
9549 		struct saved_alias *al = alias_list;
9550 
9551 		alias_list = alias_list->next;
9552 		err = sysfs_slab_alias(al->s, al->name);
9553 		if (err)
9554 			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
9555 			       al->name);
9556 		kfree(al);
9557 	}
9558 
9559 	mutex_unlock(&slab_mutex);
9560 	return 0;
9561 }
9562 late_initcall(slab_sysfs_init);
9563 #endif /* SLAB_SUPPORTS_SYSFS */
9564 
9565 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
9566 static int slab_debugfs_show(struct seq_file *seq, void *v)
9567 {
9568 	struct loc_track *t = seq->private;
9569 	struct location *l;
9570 	unsigned long idx;
9571 
9572 	idx = (unsigned long) t->idx;
9573 	if (idx < t->count) {
9574 		l = &t->loc[idx];
9575 
9576 		seq_printf(seq, "%7ld ", l->count);
9577 
9578 		if (l->addr)
9579 			seq_printf(seq, "%pS", (void *)l->addr);
9580 		else
9581 			seq_puts(seq, "<not-available>");
9582 
9583 		if (l->waste)
9584 			seq_printf(seq, " waste=%lu/%lu",
9585 				l->count * l->waste, l->waste);
9586 
9587 		if (l->sum_time != l->min_time) {
9588 			seq_printf(seq, " age=%ld/%llu/%ld",
9589 				l->min_time, div_u64(l->sum_time, l->count),
9590 				l->max_time);
9591 		} else
9592 			seq_printf(seq, " age=%ld", l->min_time);
9593 
9594 		if (l->min_pid != l->max_pid)
9595 			seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
9596 		else
9597 			seq_printf(seq, " pid=%ld",
9598 				l->min_pid);
9599 
9600 		if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
9601 			seq_printf(seq, " cpus=%*pbl",
9602 				 cpumask_pr_args(to_cpumask(l->cpus)));
9603 
9604 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
9605 			seq_printf(seq, " nodes=%*pbl",
9606 				 nodemask_pr_args(&l->nodes));
9607 
9608 #ifdef CONFIG_STACKDEPOT
9609 		{
9610 			depot_stack_handle_t handle;
9611 			unsigned long *entries;
9612 			unsigned int nr_entries, j;
9613 
9614 			handle = READ_ONCE(l->handle);
9615 			if (handle) {
9616 				nr_entries = stack_depot_fetch(handle, &entries);
9617 				seq_puts(seq, "\n");
9618 				for (j = 0; j < nr_entries; j++)
9619 					seq_printf(seq, "        %pS\n", (void *)entries[j]);
9620 			}
9621 		}
9622 #endif
9623 		seq_puts(seq, "\n");
9624 	}
9625 
9626 	if (!idx && !t->count)
9627 		seq_puts(seq, "No data\n");
9628 
9629 	return 0;
9630 }
9631 
9632 static void slab_debugfs_stop(struct seq_file *seq, void *v)
9633 {
9634 }
9635 
9636 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
9637 {
9638 	struct loc_track *t = seq->private;
9639 
9640 	t->idx = ++(*ppos);
9641 	if (*ppos <= t->count)
9642 		return ppos;
9643 
9644 	return NULL;
9645 }
9646 
9647 static int cmp_loc_by_count(const void *a, const void *b)
9648 {
9649 	struct location *loc1 = (struct location *)a;
9650 	struct location *loc2 = (struct location *)b;
9651 
9652 	return cmp_int(loc2->count, loc1->count);
9653 }
9654 
9655 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
9656 {
9657 	struct loc_track *t = seq->private;
9658 
9659 	t->idx = *ppos;
9660 	return ppos;
9661 }
9662 
9663 static const struct seq_operations slab_debugfs_sops = {
9664 	.start  = slab_debugfs_start,
9665 	.next   = slab_debugfs_next,
9666 	.stop   = slab_debugfs_stop,
9667 	.show   = slab_debugfs_show,
9668 };
9669 
9670 static int slab_debug_trace_open(struct inode *inode, struct file *filep)
9671 {
9672 
9673 	struct kmem_cache_node *n;
9674 	enum track_item alloc;
9675 	int node;
9676 	struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
9677 						sizeof(struct loc_track));
9678 	struct kmem_cache *s = file_inode(filep)->i_private;
9679 	unsigned long *obj_map;
9680 
9681 	if (!t)
9682 		return -ENOMEM;
9683 
9684 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
9685 	if (!obj_map) {
9686 		seq_release_private(inode, filep);
9687 		return -ENOMEM;
9688 	}
9689 
9690 	alloc = debugfs_get_aux_num(filep);
9691 
9692 	if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
9693 		bitmap_free(obj_map);
9694 		seq_release_private(inode, filep);
9695 		return -ENOMEM;
9696 	}
9697 
9698 	for_each_kmem_cache_node(s, node, n) {
9699 		unsigned long flags;
9700 		struct slab *slab;
9701 
9702 		if (!node_nr_slabs(n))
9703 			continue;
9704 
9705 		spin_lock_irqsave(&n->list_lock, flags);
9706 		list_for_each_entry(slab, &n->partial, slab_list)
9707 			process_slab(t, s, slab, alloc, obj_map);
9708 		list_for_each_entry(slab, &n->full, slab_list)
9709 			process_slab(t, s, slab, alloc, obj_map);
9710 		spin_unlock_irqrestore(&n->list_lock, flags);
9711 	}
9712 
9713 	/* Sort locations by count */
9714 	sort(t->loc, t->count, sizeof(struct location),
9715 	     cmp_loc_by_count, NULL);
9716 
9717 	bitmap_free(obj_map);
9718 	return 0;
9719 }
9720 
9721 static int slab_debug_trace_release(struct inode *inode, struct file *file)
9722 {
9723 	struct seq_file *seq = file->private_data;
9724 	struct loc_track *t = seq->private;
9725 
9726 	free_loc_track(t);
9727 	return seq_release_private(inode, file);
9728 }
9729 
9730 static const struct file_operations slab_debugfs_fops = {
9731 	.open    = slab_debug_trace_open,
9732 	.read    = seq_read,
9733 	.llseek  = seq_lseek,
9734 	.release = slab_debug_trace_release,
9735 };
9736 
9737 static void debugfs_slab_add(struct kmem_cache *s)
9738 {
9739 	struct dentry *slab_cache_dir;
9740 
9741 	if (unlikely(!slab_debugfs_root))
9742 		return;
9743 
9744 	slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
9745 
9746 	debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s,
9747 					TRACK_ALLOC, &slab_debugfs_fops);
9748 
9749 	debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s,
9750 					TRACK_FREE, &slab_debugfs_fops);
9751 }
9752 
9753 void debugfs_slab_release(struct kmem_cache *s)
9754 {
9755 	debugfs_lookup_and_remove(s->name, slab_debugfs_root);
9756 }
9757 
9758 static int __init slab_debugfs_init(void)
9759 {
9760 	struct kmem_cache *s;
9761 
9762 	slab_debugfs_root = debugfs_create_dir("slab", NULL);
9763 
9764 	list_for_each_entry(s, &slab_caches, list)
9765 		if (s->flags & SLAB_STORE_USER)
9766 			debugfs_slab_add(s);
9767 
9768 	return 0;
9769 
9770 }
9771 __initcall(slab_debugfs_init);
9772 #endif
9773 /*
9774  * The /proc/slabinfo ABI
9775  */
9776 #ifdef CONFIG_SLUB_DEBUG
9777 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
9778 {
9779 	unsigned long nr_slabs = 0;
9780 	unsigned long nr_objs = 0;
9781 	unsigned long nr_free = 0;
9782 	int node;
9783 	struct kmem_cache_node *n;
9784 
9785 	for_each_kmem_cache_node(s, node, n) {
9786 		nr_slabs += node_nr_slabs(n);
9787 		nr_objs += node_nr_objs(n);
9788 		nr_free += count_partial_free_approx(n);
9789 	}
9790 
9791 	sinfo->active_objs = nr_objs - nr_free;
9792 	sinfo->num_objs = nr_objs;
9793 	sinfo->active_slabs = nr_slabs;
9794 	sinfo->num_slabs = nr_slabs;
9795 	sinfo->objects_per_slab = oo_objects(s->oo);
9796 	sinfo->cache_order = oo_order(s->oo);
9797 }
9798 #endif /* CONFIG_SLUB_DEBUG */
9799