xref: /linux/mm/slub.c (revision 464b1c115852fe025635ae2065e00caced184d92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SLUB: A slab allocator with low overhead percpu array caches and mostly
4  * lockless freeing of objects to slabs in the slowpath.
5  *
6  * The allocator synchronizes using spin_trylock for percpu arrays in the
7  * fastpath, and cmpxchg_double (or bit spinlock) for slowpath freeing.
8  * Uses a centralized lock to manage a pool of partial slabs.
9  *
10  * (C) 2007 SGI, Christoph Lameter
11  * (C) 2011 Linux Foundation, Christoph Lameter
12  * (C) 2025 SUSE, Vlastimil Babka
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/swap.h> /* mm_account_reclaimed_pages() */
17 #include <linux/module.h>
18 #include <linux/bit_spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/swab.h>
21 #include <linux/bitops.h>
22 #include <linux/slab.h>
23 #include "slab.h"
24 #include <linux/vmalloc.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kasan.h>
28 #include <linux/node.h>
29 #include <linux/kmsan.h>
30 #include <linux/cpu.h>
31 #include <linux/cpuset.h>
32 #include <linux/mempolicy.h>
33 #include <linux/ctype.h>
34 #include <linux/stackdepot.h>
35 #include <linux/debugobjects.h>
36 #include <linux/kallsyms.h>
37 #include <linux/kfence.h>
38 #include <linux/memory.h>
39 #include <linux/math64.h>
40 #include <linux/fault-inject.h>
41 #include <linux/kmemleak.h>
42 #include <linux/stacktrace.h>
43 #include <linux/prefetch.h>
44 #include <linux/memcontrol.h>
45 #include <linux/random.h>
46 #include <linux/prandom.h>
47 #include <kunit/test.h>
48 #include <kunit/test-bug.h>
49 #include <linux/sort.h>
50 #include <linux/irq_work.h>
51 #include <linux/kprobes.h>
52 #include <linux/debugfs.h>
53 #include <trace/events/kmem.h>
54 
55 #include "internal.h"
56 
57 /*
58  * Lock order:
59  *   0.  cpu_hotplug_lock
60  *   1.  slab_mutex (Global Mutex)
61  *   2a. kmem_cache->cpu_sheaves->lock (Local trylock)
62  *   2b. node->barn->lock (Spinlock)
63  *   2c. node->list_lock (Spinlock)
64  *   3.  slab_lock(slab) (Only on some arches)
65  *   4.  object_map_lock (Only for debugging)
66  *
67  *   slab_mutex
68  *
69  *   The role of the slab_mutex is to protect the list of all the slabs
70  *   and to synchronize major metadata changes to slab cache structures.
71  *   Also synchronizes memory hotplug callbacks.
72  *
73  *   slab_lock
74  *
75  *   The slab_lock is a wrapper around the page lock, thus it is a bit
76  *   spinlock.
77  *
78  *   The slab_lock is only used on arches that do not have the ability
79  *   to do a cmpxchg_double. It only protects:
80  *
81  *	A. slab->freelist	-> List of free objects in a slab
82  *	B. slab->inuse		-> Number of objects in use
83  *	C. slab->objects	-> Number of objects in slab
84  *	D. slab->frozen		-> frozen state
85  *
86  *   SL_partial slabs
87  *
88  *   Slabs on node partial list have at least one free object. A limited number
89  *   of slabs on the list can be fully free (slab->inuse == 0), until we start
90  *   discarding them. These slabs are marked with SL_partial, and the flag is
91  *   cleared while removing them, usually to grab their freelist afterwards.
92  *   This clearing also exempts them from list management. Please see
93  *   __slab_free() for more details.
94  *
95  *   Full slabs
96  *
97  *   For caches without debugging enabled, full slabs (slab->inuse ==
98  *   slab->objects and slab->freelist == NULL) are not placed on any list.
99  *   The __slab_free() freeing the first object from such a slab will place
100  *   it on the partial list. Caches with debugging enabled place such slab
101  *   on the full list and use different allocation and freeing paths.
102  *
103  *   Frozen slabs
104  *
105  *   If a slab is frozen then it is exempt from list management. It is used to
106  *   indicate a slab that has failed consistency checks and thus cannot be
107  *   allocated from anymore - it is also marked as full. Any previously
108  *   allocated objects will be simply leaked upon freeing instead of attempting
109  *   to modify the potentially corrupted freelist and metadata.
110  *
111  *   To sum up, the current scheme is:
112  *   - node partial slab:            SL_partial && !full && !frozen
113  *   - taken off partial list:      !SL_partial && !full && !frozen
114  *   - full slab, not on any list:  !SL_partial &&  full && !frozen
115  *   - frozen due to inconsistency: !SL_partial &&  full &&  frozen
116  *
117  *   node->list_lock (spinlock)
118  *
119  *   The list_lock protects the partial and full list on each node and
120  *   the partial slab counter. If taken then no new slabs may be added or
121  *   removed from the lists nor make the number of partial slabs be modified.
122  *   (Note that the total number of slabs is an atomic value that may be
123  *   modified without taking the list lock).
124  *
125  *   The list_lock is a centralized lock and thus we avoid taking it as
126  *   much as possible. As long as SLUB does not have to handle partial
127  *   slabs, operations can continue without any centralized lock.
128  *
129  *   For debug caches, all allocations are forced to go through a list_lock
130  *   protected region to serialize against concurrent validation.
131  *
132  *   cpu_sheaves->lock (local_trylock)
133  *
134  *   This lock protects fastpath operations on the percpu sheaves. On !RT it
135  *   only disables preemption and does no atomic operations. As long as the main
136  *   or spare sheaf can handle the allocation or free, there is no other
137  *   overhead.
138  *
139  *   node->barn->lock (spinlock)
140  *
141  *   This lock protects the operations on per-NUMA-node barn. It can quickly
142  *   serve an empty or full sheaf if available, and avoid more expensive refill
143  *   or flush operation.
144  *
145  *   Lockless freeing
146  *
147  *   Objects may have to be freed to their slabs when they are from a remote
148  *   node (where we want to avoid filling local sheaves with remote objects)
149  *   or when there are too many full sheaves. On architectures supporting
150  *   cmpxchg_double this is done by a lockless update of slab's freelist and
151  *   counters, otherwise slab_lock is taken. This only needs to take the
152  *   list_lock if it's a first free to a full slab, or when a slab becomes empty
153  *   after the free.
154  *
155  *   irq, preemption, migration considerations
156  *
157  *   Interrupts are disabled as part of list_lock or barn lock operations, or
158  *   around the slab_lock operation, in order to make the slab allocator safe
159  *   to use in the context of an irq.
160  *   Preemption is disabled as part of local_trylock operations.
161  *   kmalloc_nolock() and kfree_nolock() are safe in NMI context but see
162  *   their limitations.
163  *
164  * SLUB assigns two object arrays called sheaves for caching allocations and
165  * frees on each cpu, with a NUMA node shared barn for balancing between cpus.
166  * Allocations and frees are primarily served from these sheaves.
167  *
168  * Slabs with free elements are kept on a partial list and during regular
169  * operations no list for full slabs is used. If an object in a full slab is
170  * freed then the slab will show up again on the partial lists.
171  * We track full slabs for debugging purposes though because otherwise we
172  * cannot scan all objects.
173  *
174  * Slabs are freed when they become empty. Teardown and setup is minimal so we
175  * rely on the page allocators per cpu caches for fast frees and allocs.
176  *
177  * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
178  * 			options set. This moves	slab handling out of
179  * 			the fast path and disables lockless freelists.
180  */
181 
182 /**
183  * enum slab_flags - How the slab flags bits are used.
184  * @SL_locked: Is locked with slab_lock()
185  * @SL_partial: On the per-node partial list
186  * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves
187  *
188  * The slab flags share space with the page flags but some bits have
189  * different interpretations.  The high bits are used for information
190  * like zone/node/section.
191  */
192 enum slab_flags {
193 	SL_locked = PG_locked,
194 	SL_partial = PG_workingset,	/* Historical reasons for this bit */
195 	SL_pfmemalloc = PG_active,	/* Historical reasons for this bit */
196 };
197 
198 #ifndef CONFIG_SLUB_TINY
199 #define __fastpath_inline __always_inline
200 #else
201 #define __fastpath_inline
202 #endif
203 
204 #ifdef CONFIG_SLUB_DEBUG
205 #ifdef CONFIG_SLUB_DEBUG_ON
206 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
207 #else
208 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
209 #endif
210 #endif		/* CONFIG_SLUB_DEBUG */
211 
212 #ifdef CONFIG_NUMA
213 static DEFINE_STATIC_KEY_FALSE(strict_numa);
214 #endif
215 
216 /* Structure holding parameters for get_from_partial() call chain */
217 struct partial_context {
218 	gfp_t flags;
219 	unsigned int orig_size;
220 };
221 
222 /* Structure holding parameters for get_partial_node_bulk() */
223 struct partial_bulk_context {
224 	gfp_t flags;
225 	unsigned int min_objects;
226 	unsigned int max_objects;
227 	struct list_head slabs;
228 };
229 
230 static inline bool kmem_cache_debug(struct kmem_cache *s)
231 {
232 	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
233 }
234 
235 void *fixup_red_left(struct kmem_cache *s, void *p)
236 {
237 	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
238 		p += s->red_left_pad;
239 
240 	return p;
241 }
242 
243 /*
244  * Issues still to be resolved:
245  *
246  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
247  *
248  * - Variable sizing of the per node arrays
249  */
250 
251 /* Enable to log cmpxchg failures */
252 #undef SLUB_DEBUG_CMPXCHG
253 
254 #ifndef CONFIG_SLUB_TINY
255 /*
256  * Minimum number of partial slabs. These will be left on the partial
257  * lists even if they are empty. kmem_cache_shrink may reclaim them.
258  */
259 #define MIN_PARTIAL 5
260 
261 /*
262  * Maximum number of desirable partial slabs.
263  * The existence of more partial slabs makes kmem_cache_shrink
264  * sort the partial list by the number of objects in use.
265  */
266 #define MAX_PARTIAL 10
267 #else
268 #define MIN_PARTIAL 0
269 #define MAX_PARTIAL 0
270 #endif
271 
272 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
273 				SLAB_POISON | SLAB_STORE_USER)
274 
275 /*
276  * These debug flags cannot use CMPXCHG because there might be consistency
277  * issues when checking or reading debug information
278  */
279 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
280 				SLAB_TRACE)
281 
282 
283 /*
284  * Debugging flags that require metadata to be stored in the slab.  These get
285  * disabled when slab_debug=O is used and a cache's min order increases with
286  * metadata.
287  */
288 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
289 
290 #define OO_SHIFT	16
291 #define OO_MASK		((1 << OO_SHIFT) - 1)
292 #define MAX_OBJS_PER_PAGE	32767 /* since slab.objects is u15 */
293 
294 /* Internal SLUB flags */
295 /* Poison object */
296 #define __OBJECT_POISON		__SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
297 /* Use cmpxchg_double */
298 
299 #ifdef system_has_freelist_aba
300 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
301 #else
302 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_UNUSED
303 #endif
304 
305 /*
306  * Tracking user of a slab.
307  */
308 #define TRACK_ADDRS_COUNT 16
309 struct track {
310 	unsigned long addr;	/* Called from address */
311 #ifdef CONFIG_STACKDEPOT
312 	depot_stack_handle_t handle;
313 #endif
314 	int cpu;		/* Was running on cpu */
315 	int pid;		/* Pid context */
316 	unsigned long when;	/* When did the operation occur */
317 };
318 
319 enum track_item { TRACK_ALLOC, TRACK_FREE };
320 
321 #ifdef SLAB_SUPPORTS_SYSFS
322 static int sysfs_slab_add(struct kmem_cache *);
323 #else
324 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
325 #endif
326 
327 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
328 static void debugfs_slab_add(struct kmem_cache *);
329 #else
330 static inline void debugfs_slab_add(struct kmem_cache *s) { }
331 #endif
332 
333 enum add_mode {
334 	ADD_TO_HEAD,
335 	ADD_TO_TAIL,
336 };
337 
338 enum stat_item {
339 	ALLOC_FASTPATH,		/* Allocation from percpu sheaves */
340 	ALLOC_SLOWPATH,		/* Allocation from partial or new slab */
341 	FREE_RCU_SHEAF,		/* Free to rcu_free sheaf */
342 	FREE_RCU_SHEAF_FAIL,	/* Failed to free to a rcu_free sheaf */
343 	FREE_FASTPATH,		/* Free to percpu sheaves */
344 	FREE_SLOWPATH,		/* Free to a slab */
345 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
346 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
347 	ALLOC_SLAB,		/* New slab acquired from page allocator */
348 	ALLOC_NODE_MISMATCH,	/* Requested node different from cpu sheaf */
349 	FREE_SLAB,		/* Slab freed to the page allocator */
350 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
351 	CMPXCHG_DOUBLE_FAIL,	/* Failures of slab freelist update */
352 	SHEAF_FLUSH,		/* Objects flushed from a sheaf */
353 	SHEAF_REFILL,		/* Objects refilled to a sheaf */
354 	SHEAF_ALLOC,		/* Allocation of an empty sheaf */
355 	SHEAF_FREE,		/* Freeing of an empty sheaf */
356 	BARN_GET,		/* Got full sheaf from barn */
357 	BARN_GET_FAIL,		/* Failed to get full sheaf from barn */
358 	BARN_PUT,		/* Put full sheaf to barn */
359 	BARN_PUT_FAIL,		/* Failed to put full sheaf to barn */
360 	SHEAF_PREFILL_FAST,	/* Sheaf prefill grabbed the spare sheaf */
361 	SHEAF_PREFILL_SLOW,	/* Sheaf prefill found no spare sheaf */
362 	SHEAF_PREFILL_OVERSIZE,	/* Allocation of oversize sheaf for prefill */
363 	SHEAF_RETURN_FAST,	/* Sheaf return reattached spare sheaf */
364 	SHEAF_RETURN_SLOW,	/* Sheaf return could not reattach spare */
365 	NR_SLUB_STAT_ITEMS
366 };
367 
368 #ifdef CONFIG_SLUB_STATS
369 struct kmem_cache_stats {
370 	unsigned int stat[NR_SLUB_STAT_ITEMS];
371 };
372 #endif
373 
374 static inline void stat(const struct kmem_cache *s, enum stat_item si)
375 {
376 #ifdef CONFIG_SLUB_STATS
377 	/*
378 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
379 	 * avoid this_cpu_add()'s irq-disable overhead.
380 	 */
381 	raw_cpu_inc(s->cpu_stats->stat[si]);
382 #endif
383 }
384 
385 static inline
386 void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
387 {
388 #ifdef CONFIG_SLUB_STATS
389 	raw_cpu_add(s->cpu_stats->stat[si], v);
390 #endif
391 }
392 
393 #define MAX_FULL_SHEAVES	10
394 #define MAX_EMPTY_SHEAVES	10
395 
396 struct node_barn {
397 	spinlock_t lock;
398 	struct list_head sheaves_full;
399 	struct list_head sheaves_empty;
400 	unsigned int nr_full;
401 	unsigned int nr_empty;
402 };
403 
404 struct slab_sheaf {
405 	union {
406 		struct rcu_head rcu_head;
407 		struct list_head barn_list;
408 		/* only used for prefilled sheafs */
409 		struct {
410 			unsigned int capacity;
411 			bool pfmemalloc;
412 		};
413 	};
414 	struct kmem_cache *cache;
415 	unsigned int size;
416 	int node; /* only used for rcu_sheaf */
417 	void *objects[];
418 };
419 
420 struct slub_percpu_sheaves {
421 	local_trylock_t lock;
422 	struct slab_sheaf *main; /* never NULL when unlocked */
423 	struct slab_sheaf *spare; /* empty or full, may be NULL */
424 	struct slab_sheaf *rcu_free; /* for batching kfree_rcu() */
425 };
426 
427 /*
428  * The slab lists for all objects.
429  */
430 struct kmem_cache_node {
431 	spinlock_t list_lock;
432 	unsigned long nr_partial;
433 	struct list_head partial;
434 #ifdef CONFIG_SLUB_DEBUG
435 	atomic_long_t nr_slabs;
436 	atomic_long_t total_objects;
437 	struct list_head full;
438 #endif
439 	struct node_barn *barn;
440 };
441 
442 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
443 {
444 	return s->node[node];
445 }
446 
447 /*
448  * Get the barn of the current cpu's closest memory node. It may not exist on
449  * systems with memoryless nodes but without CONFIG_HAVE_MEMORYLESS_NODES
450  */
451 static inline struct node_barn *get_barn(struct kmem_cache *s)
452 {
453 	struct kmem_cache_node *n = get_node(s, numa_mem_id());
454 
455 	if (!n)
456 		return NULL;
457 
458 	return n->barn;
459 }
460 
461 /*
462  * Iterator over all nodes. The body will be executed for each node that has
463  * a kmem_cache_node structure allocated (which is true for all online nodes)
464  */
465 #define for_each_kmem_cache_node(__s, __node, __n) \
466 	for (__node = 0; __node < nr_node_ids; __node++) \
467 		 if ((__n = get_node(__s, __node)))
468 
469 /*
470  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
471  * Corresponds to node_state[N_MEMORY], but can temporarily
472  * differ during memory hotplug/hotremove operations.
473  * Protected by slab_mutex.
474  */
475 static nodemask_t slab_nodes;
476 
477 /*
478  * Workqueue used for flushing cpu and kfree_rcu sheaves.
479  */
480 static struct workqueue_struct *flushwq;
481 
482 struct slub_flush_work {
483 	struct work_struct work;
484 	struct kmem_cache *s;
485 	bool skip;
486 };
487 
488 static DEFINE_MUTEX(flush_lock);
489 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
490 
491 /********************************************************************
492  * 			Core slab cache functions
493  *******************************************************************/
494 
495 /*
496  * Returns freelist pointer (ptr). With hardening, this is obfuscated
497  * with an XOR of the address where the pointer is held and a per-cache
498  * random number.
499  */
500 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
501 					    void *ptr, unsigned long ptr_addr)
502 {
503 	unsigned long encoded;
504 
505 #ifdef CONFIG_SLAB_FREELIST_HARDENED
506 	encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
507 #else
508 	encoded = (unsigned long)ptr;
509 #endif
510 	return (freeptr_t){.v = encoded};
511 }
512 
513 static inline void *freelist_ptr_decode(const struct kmem_cache *s,
514 					freeptr_t ptr, unsigned long ptr_addr)
515 {
516 	void *decoded;
517 
518 #ifdef CONFIG_SLAB_FREELIST_HARDENED
519 	decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
520 #else
521 	decoded = (void *)ptr.v;
522 #endif
523 	return decoded;
524 }
525 
526 static inline void *get_freepointer(struct kmem_cache *s, void *object)
527 {
528 	unsigned long ptr_addr;
529 	freeptr_t p;
530 
531 	object = kasan_reset_tag(object);
532 	ptr_addr = (unsigned long)object + s->offset;
533 	p = *(freeptr_t *)(ptr_addr);
534 	return freelist_ptr_decode(s, p, ptr_addr);
535 }
536 
537 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
538 {
539 	unsigned long freeptr_addr = (unsigned long)object + s->offset;
540 
541 #ifdef CONFIG_SLAB_FREELIST_HARDENED
542 	BUG_ON(object == fp); /* naive detection of double free or corruption */
543 #endif
544 
545 	freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
546 	*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
547 }
548 
549 /*
550  * See comment in calculate_sizes().
551  */
552 static inline bool freeptr_outside_object(struct kmem_cache *s)
553 {
554 	return s->offset >= s->inuse;
555 }
556 
557 /*
558  * Return offset of the end of info block which is inuse + free pointer if
559  * not overlapping with object.
560  */
561 static inline unsigned int get_info_end(struct kmem_cache *s)
562 {
563 	if (freeptr_outside_object(s))
564 		return s->inuse + sizeof(void *);
565 	else
566 		return s->inuse;
567 }
568 
569 /* Loop over all objects in a slab */
570 #define for_each_object(__p, __s, __addr, __objects) \
571 	for (__p = fixup_red_left(__s, __addr); \
572 		__p < (__addr) + (__objects) * (__s)->size; \
573 		__p += (__s)->size)
574 
575 static inline unsigned int order_objects(unsigned int order, unsigned int size)
576 {
577 	return ((unsigned int)PAGE_SIZE << order) / size;
578 }
579 
580 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
581 		unsigned int size)
582 {
583 	struct kmem_cache_order_objects x = {
584 		(order << OO_SHIFT) + order_objects(order, size)
585 	};
586 
587 	return x;
588 }
589 
590 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
591 {
592 	return x.x >> OO_SHIFT;
593 }
594 
595 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
596 {
597 	return x.x & OO_MASK;
598 }
599 
600 /*
601  * If network-based swap is enabled, slub must keep track of whether memory
602  * were allocated from pfmemalloc reserves.
603  */
604 static inline bool slab_test_pfmemalloc(const struct slab *slab)
605 {
606 	return test_bit(SL_pfmemalloc, &slab->flags.f);
607 }
608 
609 static inline void slab_set_pfmemalloc(struct slab *slab)
610 {
611 	set_bit(SL_pfmemalloc, &slab->flags.f);
612 }
613 
614 static inline void __slab_clear_pfmemalloc(struct slab *slab)
615 {
616 	__clear_bit(SL_pfmemalloc, &slab->flags.f);
617 }
618 
619 /*
620  * Per slab locking using the pagelock
621  */
622 static __always_inline void slab_lock(struct slab *slab)
623 {
624 	bit_spin_lock(SL_locked, &slab->flags.f);
625 }
626 
627 static __always_inline void slab_unlock(struct slab *slab)
628 {
629 	bit_spin_unlock(SL_locked, &slab->flags.f);
630 }
631 
632 static inline bool
633 __update_freelist_fast(struct slab *slab, struct freelist_counters *old,
634 		       struct freelist_counters *new)
635 {
636 #ifdef system_has_freelist_aba
637 	return try_cmpxchg_freelist(&slab->freelist_counters,
638 				    &old->freelist_counters,
639 				    new->freelist_counters);
640 #else
641 	return false;
642 #endif
643 }
644 
645 static inline bool
646 __update_freelist_slow(struct slab *slab, struct freelist_counters *old,
647 		       struct freelist_counters *new)
648 {
649 	bool ret = false;
650 
651 	slab_lock(slab);
652 	if (slab->freelist == old->freelist &&
653 	    slab->counters == old->counters) {
654 		slab->freelist = new->freelist;
655 		/* prevent tearing for the read in get_partial_node_bulk() */
656 		WRITE_ONCE(slab->counters, new->counters);
657 		ret = true;
658 	}
659 	slab_unlock(slab);
660 
661 	return ret;
662 }
663 
664 /*
665  * Interrupts must be disabled (for the fallback code to work right), typically
666  * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
667  * part of bit_spin_lock(), is sufficient because the policy is not to allow any
668  * allocation/ free operation in hardirq context. Therefore nothing can
669  * interrupt the operation.
670  */
671 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
672 		struct freelist_counters *old, struct freelist_counters *new, const char *n)
673 {
674 	bool ret;
675 
676 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
677 		lockdep_assert_irqs_disabled();
678 
679 	if (s->flags & __CMPXCHG_DOUBLE)
680 		ret = __update_freelist_fast(slab, old, new);
681 	else
682 		ret = __update_freelist_slow(slab, old, new);
683 
684 	if (likely(ret))
685 		return true;
686 
687 	cpu_relax();
688 	stat(s, CMPXCHG_DOUBLE_FAIL);
689 
690 #ifdef SLUB_DEBUG_CMPXCHG
691 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
692 #endif
693 
694 	return false;
695 }
696 
697 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
698 		struct freelist_counters *old, struct freelist_counters *new, const char *n)
699 {
700 	bool ret;
701 
702 	if (s->flags & __CMPXCHG_DOUBLE) {
703 		ret = __update_freelist_fast(slab, old, new);
704 	} else {
705 		unsigned long flags;
706 
707 		local_irq_save(flags);
708 		ret = __update_freelist_slow(slab, old, new);
709 		local_irq_restore(flags);
710 	}
711 	if (likely(ret))
712 		return true;
713 
714 	cpu_relax();
715 	stat(s, CMPXCHG_DOUBLE_FAIL);
716 
717 #ifdef SLUB_DEBUG_CMPXCHG
718 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
719 #endif
720 
721 	return false;
722 }
723 
724 /*
725  * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
726  * family will round up the real request size to these fixed ones, so
727  * there could be an extra area than what is requested. Save the original
728  * request size in the meta data area, for better debug and sanity check.
729  */
730 static inline void set_orig_size(struct kmem_cache *s,
731 				void *object, unsigned long orig_size)
732 {
733 	void *p = kasan_reset_tag(object);
734 
735 	if (!slub_debug_orig_size(s))
736 		return;
737 
738 	p += get_info_end(s);
739 	p += sizeof(struct track) * 2;
740 
741 	*(unsigned long *)p = orig_size;
742 }
743 
744 static inline unsigned long get_orig_size(struct kmem_cache *s, void *object)
745 {
746 	void *p = kasan_reset_tag(object);
747 
748 	if (is_kfence_address(object))
749 		return kfence_ksize(object);
750 
751 	if (!slub_debug_orig_size(s))
752 		return s->object_size;
753 
754 	p += get_info_end(s);
755 	p += sizeof(struct track) * 2;
756 
757 	return *(unsigned long *)p;
758 }
759 
760 #ifdef CONFIG_SLAB_OBJ_EXT
761 
762 /*
763  * Check if memory cgroup or memory allocation profiling is enabled.
764  * If enabled, SLUB tries to reduce memory overhead of accounting
765  * slab objects. If neither is enabled when this function is called,
766  * the optimization is simply skipped to avoid affecting caches that do not
767  * need slabobj_ext metadata.
768  *
769  * However, this may disable optimization when memory cgroup or memory
770  * allocation profiling is used, but slabs are created too early
771  * even before those subsystems are initialized.
772  */
773 static inline bool need_slab_obj_exts(struct kmem_cache *s)
774 {
775 	if (s->flags & SLAB_NO_OBJ_EXT)
776 		return false;
777 
778 	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
779 		return true;
780 
781 	if (mem_alloc_profiling_enabled())
782 		return true;
783 
784 	return false;
785 }
786 
787 static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
788 {
789 	return sizeof(struct slabobj_ext) * slab->objects;
790 }
791 
792 static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
793 						    struct slab *slab)
794 {
795 	unsigned long objext_offset;
796 
797 	objext_offset = s->size * slab->objects;
798 	objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext));
799 	return objext_offset;
800 }
801 
802 static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
803 						     struct slab *slab)
804 {
805 	unsigned long objext_offset = obj_exts_offset_in_slab(s, slab);
806 	unsigned long objext_size = obj_exts_size_in_slab(slab);
807 
808 	return objext_offset + objext_size <= slab_size(slab);
809 }
810 
811 static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
812 {
813 	unsigned long obj_exts;
814 	unsigned long start;
815 	unsigned long end;
816 
817 	obj_exts = slab_obj_exts(slab);
818 	if (!obj_exts)
819 		return false;
820 
821 	start = (unsigned long)slab_address(slab);
822 	end = start + slab_size(slab);
823 	return (obj_exts >= start) && (obj_exts < end);
824 }
825 #else
826 static inline bool need_slab_obj_exts(struct kmem_cache *s)
827 {
828 	return false;
829 }
830 
831 static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
832 {
833 	return 0;
834 }
835 
836 static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
837 						    struct slab *slab)
838 {
839 	return 0;
840 }
841 
842 static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
843 						     struct slab *slab)
844 {
845 	return false;
846 }
847 
848 static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
849 {
850 	return false;
851 }
852 
853 #endif
854 
855 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
856 static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
857 {
858 	/*
859 	 * Note we cannot rely on the SLAB_OBJ_EXT_IN_OBJ flag here and need to
860 	 * check the stride. A cache can have SLAB_OBJ_EXT_IN_OBJ set, but
861 	 * allocations within_slab_leftover are preferred. And those may be
862 	 * possible or not depending on the particular slab's size.
863 	 */
864 	return obj_exts_in_slab(s, slab) &&
865 	       (slab_get_stride(slab) == s->size);
866 }
867 
868 static unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
869 {
870 	unsigned int offset = get_info_end(s);
871 
872 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
873 		offset += sizeof(struct track) * 2;
874 
875 	if (slub_debug_orig_size(s))
876 		offset += sizeof(unsigned long);
877 
878 	offset += kasan_metadata_size(s, false);
879 
880 	return offset;
881 }
882 #else
883 static inline bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
884 {
885 	return false;
886 }
887 
888 static inline unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
889 {
890 	return 0;
891 }
892 #endif
893 
894 #ifdef CONFIG_SLUB_DEBUG
895 
896 /*
897  * For debugging context when we want to check if the struct slab pointer
898  * appears to be valid.
899  */
900 static inline bool validate_slab_ptr(struct slab *slab)
901 {
902 	return PageSlab(slab_page(slab));
903 }
904 
905 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
906 static DEFINE_SPINLOCK(object_map_lock);
907 
908 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
909 		       struct slab *slab)
910 {
911 	void *addr = slab_address(slab);
912 	void *p;
913 
914 	bitmap_zero(obj_map, slab->objects);
915 
916 	for (p = slab->freelist; p; p = get_freepointer(s, p))
917 		set_bit(__obj_to_index(s, addr, p), obj_map);
918 }
919 
920 #if IS_ENABLED(CONFIG_KUNIT)
921 static bool slab_add_kunit_errors(void)
922 {
923 	struct kunit_resource *resource;
924 
925 	if (!kunit_get_current_test())
926 		return false;
927 
928 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
929 	if (!resource)
930 		return false;
931 
932 	(*(int *)resource->data)++;
933 	kunit_put_resource(resource);
934 	return true;
935 }
936 
937 bool slab_in_kunit_test(void)
938 {
939 	struct kunit_resource *resource;
940 
941 	if (!kunit_get_current_test())
942 		return false;
943 
944 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
945 	if (!resource)
946 		return false;
947 
948 	kunit_put_resource(resource);
949 	return true;
950 }
951 #else
952 static inline bool slab_add_kunit_errors(void) { return false; }
953 #endif
954 
955 static inline unsigned int size_from_object(struct kmem_cache *s)
956 {
957 	if (s->flags & SLAB_RED_ZONE)
958 		return s->size - s->red_left_pad;
959 
960 	return s->size;
961 }
962 
963 static inline void *restore_red_left(struct kmem_cache *s, void *p)
964 {
965 	if (s->flags & SLAB_RED_ZONE)
966 		p -= s->red_left_pad;
967 
968 	return p;
969 }
970 
971 /*
972  * Debug settings:
973  */
974 #if defined(CONFIG_SLUB_DEBUG_ON)
975 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
976 #else
977 static slab_flags_t slub_debug;
978 #endif
979 
980 static const char *slub_debug_string __ro_after_init;
981 static int disable_higher_order_debug;
982 
983 /*
984  * Object debugging
985  */
986 
987 /* Verify that a pointer has an address that is valid within a slab page */
988 static inline int check_valid_pointer(struct kmem_cache *s,
989 				struct slab *slab, void *object)
990 {
991 	void *base;
992 
993 	if (!object)
994 		return 1;
995 
996 	base = slab_address(slab);
997 	object = kasan_reset_tag(object);
998 	object = restore_red_left(s, object);
999 	if (object < base || object >= base + slab->objects * s->size ||
1000 		(object - base) % s->size) {
1001 		return 0;
1002 	}
1003 
1004 	return 1;
1005 }
1006 
1007 static void print_section(char *level, char *text, u8 *addr,
1008 			  unsigned int length)
1009 {
1010 	metadata_access_enable();
1011 	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
1012 			16, 1, kasan_reset_tag((void *)addr), length, 1);
1013 	metadata_access_disable();
1014 }
1015 
1016 static struct track *get_track(struct kmem_cache *s, void *object,
1017 	enum track_item alloc)
1018 {
1019 	struct track *p;
1020 
1021 	p = object + get_info_end(s);
1022 
1023 	return kasan_reset_tag(p + alloc);
1024 }
1025 
1026 #ifdef CONFIG_STACKDEPOT
1027 static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
1028 {
1029 	depot_stack_handle_t handle;
1030 	unsigned long entries[TRACK_ADDRS_COUNT];
1031 	unsigned int nr_entries;
1032 
1033 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
1034 	handle = stack_depot_save(entries, nr_entries, gfp_flags);
1035 
1036 	return handle;
1037 }
1038 #else
1039 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
1040 {
1041 	return 0;
1042 }
1043 #endif
1044 
1045 static void set_track_update(struct kmem_cache *s, void *object,
1046 			     enum track_item alloc, unsigned long addr,
1047 			     depot_stack_handle_t handle)
1048 {
1049 	struct track *p = get_track(s, object, alloc);
1050 
1051 #ifdef CONFIG_STACKDEPOT
1052 	p->handle = handle;
1053 #endif
1054 	p->addr = addr;
1055 	p->cpu = raw_smp_processor_id();
1056 	p->pid = current->pid;
1057 	p->when = jiffies;
1058 }
1059 
1060 static __always_inline void set_track(struct kmem_cache *s, void *object,
1061 				      enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
1062 {
1063 	depot_stack_handle_t handle = set_track_prepare(gfp_flags);
1064 
1065 	set_track_update(s, object, alloc, addr, handle);
1066 }
1067 
1068 static void init_tracking(struct kmem_cache *s, void *object)
1069 {
1070 	struct track *p;
1071 
1072 	if (!(s->flags & SLAB_STORE_USER))
1073 		return;
1074 
1075 	p = get_track(s, object, TRACK_ALLOC);
1076 	memset(p, 0, 2*sizeof(struct track));
1077 }
1078 
1079 static void print_track(const char *s, struct track *t, unsigned long pr_time)
1080 {
1081 	depot_stack_handle_t handle __maybe_unused;
1082 
1083 	if (!t->addr)
1084 		return;
1085 
1086 	pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
1087 	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
1088 #ifdef CONFIG_STACKDEPOT
1089 	handle = READ_ONCE(t->handle);
1090 	if (handle)
1091 		stack_depot_print(handle);
1092 	else
1093 		pr_err("object allocation/free stack trace missing\n");
1094 #endif
1095 }
1096 
1097 void print_tracking(struct kmem_cache *s, void *object)
1098 {
1099 	unsigned long pr_time = jiffies;
1100 	if (!(s->flags & SLAB_STORE_USER))
1101 		return;
1102 
1103 	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
1104 	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
1105 }
1106 
1107 static void print_slab_info(const struct slab *slab)
1108 {
1109 	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
1110 	       slab, slab->objects, slab->inuse, slab->freelist,
1111 	       &slab->flags.f);
1112 }
1113 
1114 void skip_orig_size_check(struct kmem_cache *s, const void *object)
1115 {
1116 	set_orig_size(s, (void *)object, s->object_size);
1117 }
1118 
1119 static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
1120 {
1121 	struct va_format vaf;
1122 	va_list args;
1123 
1124 	va_copy(args, argsp);
1125 	vaf.fmt = fmt;
1126 	vaf.va = &args;
1127 	pr_err("=============================================================================\n");
1128 	pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
1129 	pr_err("-----------------------------------------------------------------------------\n\n");
1130 	va_end(args);
1131 }
1132 
1133 static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
1134 {
1135 	va_list args;
1136 
1137 	va_start(args, fmt);
1138 	__slab_bug(s, fmt, args);
1139 	va_end(args);
1140 }
1141 
1142 __printf(2, 3)
1143 static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
1144 {
1145 	struct va_format vaf;
1146 	va_list args;
1147 
1148 	if (slab_add_kunit_errors())
1149 		return;
1150 
1151 	va_start(args, fmt);
1152 	vaf.fmt = fmt;
1153 	vaf.va = &args;
1154 	pr_err("FIX %s: %pV\n", s->name, &vaf);
1155 	va_end(args);
1156 }
1157 
1158 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1159 {
1160 	unsigned int off;	/* Offset of last byte */
1161 	u8 *addr = slab_address(slab);
1162 
1163 	print_tracking(s, p);
1164 
1165 	print_slab_info(slab);
1166 
1167 	pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1168 	       p, p - addr, get_freepointer(s, p));
1169 
1170 	if (s->flags & SLAB_RED_ZONE)
1171 		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
1172 			      s->red_left_pad);
1173 	else if (p > addr + 16)
1174 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1175 
1176 	print_section(KERN_ERR,         "Object   ", p,
1177 		      min_t(unsigned int, s->object_size, PAGE_SIZE));
1178 	if (s->flags & SLAB_RED_ZONE)
1179 		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
1180 			s->inuse - s->object_size);
1181 
1182 	off = get_info_end(s);
1183 
1184 	if (s->flags & SLAB_STORE_USER)
1185 		off += 2 * sizeof(struct track);
1186 
1187 	if (slub_debug_orig_size(s))
1188 		off += sizeof(unsigned long);
1189 
1190 	off += kasan_metadata_size(s, false);
1191 
1192 	if (obj_exts_in_object(s, slab))
1193 		off += sizeof(struct slabobj_ext);
1194 
1195 	if (off != size_from_object(s))
1196 		/* Beginning of the filler is the free pointer */
1197 		print_section(KERN_ERR, "Padding  ", p + off,
1198 			      size_from_object(s) - off);
1199 }
1200 
1201 static void object_err(struct kmem_cache *s, struct slab *slab,
1202 			u8 *object, const char *reason)
1203 {
1204 	if (slab_add_kunit_errors())
1205 		return;
1206 
1207 	slab_bug(s, reason);
1208 	if (!object || !check_valid_pointer(s, slab, object)) {
1209 		print_slab_info(slab);
1210 		pr_err("Invalid pointer 0x%p\n", object);
1211 	} else {
1212 		print_trailer(s, slab, object);
1213 	}
1214 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1215 
1216 	WARN_ON(1);
1217 }
1218 
1219 static void __slab_err(struct slab *slab)
1220 {
1221 	if (slab_in_kunit_test())
1222 		return;
1223 
1224 	print_slab_info(slab);
1225 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1226 
1227 	WARN_ON(1);
1228 }
1229 
1230 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1231 			const char *fmt, ...)
1232 {
1233 	va_list args;
1234 
1235 	if (slab_add_kunit_errors())
1236 		return;
1237 
1238 	va_start(args, fmt);
1239 	__slab_bug(s, fmt, args);
1240 	va_end(args);
1241 
1242 	__slab_err(slab);
1243 }
1244 
1245 static void init_object(struct kmem_cache *s, void *object, u8 val)
1246 {
1247 	u8 *p = kasan_reset_tag(object);
1248 	unsigned int poison_size = s->object_size;
1249 
1250 	if (s->flags & SLAB_RED_ZONE) {
1251 		/*
1252 		 * Here and below, avoid overwriting the KMSAN shadow. Keeping
1253 		 * the shadow makes it possible to distinguish uninit-value
1254 		 * from use-after-free.
1255 		 */
1256 		memset_no_sanitize_memory(p - s->red_left_pad, val,
1257 					  s->red_left_pad);
1258 
1259 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1260 			/*
1261 			 * Redzone the extra allocated space by kmalloc than
1262 			 * requested, and the poison size will be limited to
1263 			 * the original request size accordingly.
1264 			 */
1265 			poison_size = get_orig_size(s, object);
1266 		}
1267 	}
1268 
1269 	if (s->flags & __OBJECT_POISON) {
1270 		memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1);
1271 		memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1);
1272 	}
1273 
1274 	if (s->flags & SLAB_RED_ZONE)
1275 		memset_no_sanitize_memory(p + poison_size, val,
1276 					  s->inuse - poison_size);
1277 }
1278 
1279 static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
1280 						void *from, void *to)
1281 {
1282 	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1283 	memset(from, data, to - from);
1284 }
1285 
1286 #ifdef CONFIG_KMSAN
1287 #define pad_check_attributes noinline __no_kmsan_checks
1288 #else
1289 #define pad_check_attributes
1290 #endif
1291 
1292 static pad_check_attributes int
1293 check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1294 		       u8 *object, const char *what, u8 *start, unsigned int value,
1295 		       unsigned int bytes, bool slab_obj_print)
1296 {
1297 	u8 *fault;
1298 	u8 *end;
1299 	u8 *addr = slab_address(slab);
1300 
1301 	metadata_access_enable();
1302 	fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1303 	metadata_access_disable();
1304 	if (!fault)
1305 		return 1;
1306 
1307 	end = start + bytes;
1308 	while (end > fault && end[-1] == value)
1309 		end--;
1310 
1311 	if (slab_add_kunit_errors())
1312 		goto skip_bug_print;
1313 
1314 	pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1315 	       what, fault, end - 1, fault - addr, fault[0], value);
1316 
1317 	if (slab_obj_print)
1318 		object_err(s, slab, object, "Object corrupt");
1319 
1320 skip_bug_print:
1321 	restore_bytes(s, what, value, fault, end);
1322 	return 0;
1323 }
1324 
1325 /*
1326  * Object field layout:
1327  *
1328  * [Left redzone padding] (if SLAB_RED_ZONE)
1329  *   - Field size: s->red_left_pad
1330  *   - Immediately precedes each object when SLAB_RED_ZONE is set.
1331  *   - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1332  *     0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
1333  *
1334  * [Object bytes] (object address starts here)
1335  *   - Field size: s->object_size
1336  *   - Object payload bytes.
1337  *   - If the freepointer may overlap the object, it is stored inside
1338  *     the object (typically near the middle).
1339  *   - Poisoning uses 0x6b (POISON_FREE) and the last byte is
1340  *     0xa5 (POISON_END) when __OBJECT_POISON is enabled.
1341  *
1342  * [Word-align padding] (right redzone when SLAB_RED_ZONE is set)
1343  *   - Field size: s->inuse - s->object_size
1344  *   - If redzoning is enabled and ALIGN(size, sizeof(void *)) adds no
1345  *     padding, explicitly extend by one word so the right redzone is
1346  *     non-empty.
1347  *   - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1348  *     0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
1349  *
1350  * [Metadata starts at object + s->inuse]
1351  *   - A. freelist pointer (if freeptr_outside_object)
1352  *   - B. alloc tracking (SLAB_STORE_USER)
1353  *   - C. free tracking (SLAB_STORE_USER)
1354  *   - D. original request size (SLAB_KMALLOC && SLAB_STORE_USER)
1355  *   - E. KASAN metadata (if enabled)
1356  *
1357  * [Mandatory padding] (if CONFIG_SLUB_DEBUG && SLAB_RED_ZONE)
1358  *   - One mandatory debug word to guarantee a minimum poisoned gap
1359  *     between metadata and the next object, independent of alignment.
1360  *   - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set.
1361  * [Final alignment padding]
1362  *   - Bytes added by ALIGN(size, s->align) to reach s->size.
1363  *   - When the padding is large enough, it can be used to store
1364  *     struct slabobj_ext for accounting metadata (obj_exts_in_object()).
1365  *   - The remaining bytes (if any) are filled with 0x5a (POISON_INUSE)
1366  *     when SLAB_POISON is set.
1367  *
1368  * Notes:
1369  * - Redzones are filled by init_object() with SLUB_RED_ACTIVE/INACTIVE.
1370  * - Object contents are poisoned with POISON_FREE/END when __OBJECT_POISON.
1371  * - The trailing padding is pre-filled with POISON_INUSE by
1372  *   setup_slab_debug() when SLAB_POISON is set, and is validated by
1373  *   check_pad_bytes().
1374  * - The first object pointer is slab_address(slab) +
1375  *   (s->red_left_pad if redzoning); subsequent objects are reached by
1376  *   adding s->size each time.
1377  *
1378  * If a slab cache flag relies on specific metadata to exist at a fixed
1379  * offset, the flag must be included in SLAB_NEVER_MERGE to prevent merging.
1380  * Otherwise, the cache would misbehave as s->object_size and s->inuse are
1381  * adjusted during cache merging (see __kmem_cache_alias()).
1382  */
1383 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1384 {
1385 	unsigned long off = get_info_end(s);	/* The end of info */
1386 
1387 	if (s->flags & SLAB_STORE_USER) {
1388 		/* We also have user information there */
1389 		off += 2 * sizeof(struct track);
1390 
1391 		if (s->flags & SLAB_KMALLOC)
1392 			off += sizeof(unsigned long);
1393 	}
1394 
1395 	off += kasan_metadata_size(s, false);
1396 
1397 	if (obj_exts_in_object(s, slab))
1398 		off += sizeof(struct slabobj_ext);
1399 
1400 	if (size_from_object(s) == off)
1401 		return 1;
1402 
1403 	return check_bytes_and_report(s, slab, p, "Object padding",
1404 			p + off, POISON_INUSE, size_from_object(s) - off, true);
1405 }
1406 
1407 /* Check the pad bytes at the end of a slab page */
1408 static pad_check_attributes void
1409 slab_pad_check(struct kmem_cache *s, struct slab *slab)
1410 {
1411 	u8 *start;
1412 	u8 *fault;
1413 	u8 *end;
1414 	u8 *pad;
1415 	int length;
1416 	int remainder;
1417 
1418 	if (!(s->flags & SLAB_POISON))
1419 		return;
1420 
1421 	start = slab_address(slab);
1422 	length = slab_size(slab);
1423 	end = start + length;
1424 
1425 	if (obj_exts_in_slab(s, slab) && !obj_exts_in_object(s, slab)) {
1426 		remainder = length;
1427 		remainder -= obj_exts_offset_in_slab(s, slab);
1428 		remainder -= obj_exts_size_in_slab(slab);
1429 	} else {
1430 		remainder = length % s->size;
1431 	}
1432 
1433 	if (!remainder)
1434 		return;
1435 
1436 	pad = end - remainder;
1437 	metadata_access_enable();
1438 	fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1439 	metadata_access_disable();
1440 	if (!fault)
1441 		return;
1442 	while (end > fault && end[-1] == POISON_INUSE)
1443 		end--;
1444 
1445 	slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1446 		 fault, end - 1, fault - start);
1447 	print_section(KERN_ERR, "Padding ", pad, remainder);
1448 	__slab_err(slab);
1449 
1450 	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1451 }
1452 
1453 static int check_object(struct kmem_cache *s, struct slab *slab,
1454 					void *object, u8 val)
1455 {
1456 	u8 *p = object;
1457 	u8 *endobject = object + s->object_size;
1458 	unsigned int orig_size, kasan_meta_size;
1459 	int ret = 1;
1460 
1461 	if (s->flags & SLAB_RED_ZONE) {
1462 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1463 			object - s->red_left_pad, val, s->red_left_pad, ret))
1464 			ret = 0;
1465 
1466 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1467 			endobject, val, s->inuse - s->object_size, ret))
1468 			ret = 0;
1469 
1470 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1471 			orig_size = get_orig_size(s, object);
1472 
1473 			if (s->object_size > orig_size  &&
1474 				!check_bytes_and_report(s, slab, object,
1475 					"kmalloc Redzone", p + orig_size,
1476 					val, s->object_size - orig_size, ret)) {
1477 				ret = 0;
1478 			}
1479 		}
1480 	} else {
1481 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1482 			if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1483 				endobject, POISON_INUSE,
1484 				s->inuse - s->object_size, ret))
1485 				ret = 0;
1486 		}
1487 	}
1488 
1489 	if (s->flags & SLAB_POISON) {
1490 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1491 			/*
1492 			 * KASAN can save its free meta data inside of the
1493 			 * object at offset 0. Thus, skip checking the part of
1494 			 * the redzone that overlaps with the meta data.
1495 			 */
1496 			kasan_meta_size = kasan_metadata_size(s, true);
1497 			if (kasan_meta_size < s->object_size - 1 &&
1498 			    !check_bytes_and_report(s, slab, p, "Poison",
1499 					p + kasan_meta_size, POISON_FREE,
1500 					s->object_size - kasan_meta_size - 1, ret))
1501 				ret = 0;
1502 			if (kasan_meta_size < s->object_size &&
1503 			    !check_bytes_and_report(s, slab, p, "End Poison",
1504 					p + s->object_size - 1, POISON_END, 1, ret))
1505 				ret = 0;
1506 		}
1507 		/*
1508 		 * check_pad_bytes cleans up on its own.
1509 		 */
1510 		if (!check_pad_bytes(s, slab, p))
1511 			ret = 0;
1512 	}
1513 
1514 	/*
1515 	 * Cannot check freepointer while object is allocated if
1516 	 * object and freepointer overlap.
1517 	 */
1518 	if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1519 	    !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1520 		object_err(s, slab, p, "Freepointer corrupt");
1521 		/*
1522 		 * No choice but to zap it and thus lose the remainder
1523 		 * of the free objects in this slab. May cause
1524 		 * another error because the object count is now wrong.
1525 		 */
1526 		set_freepointer(s, p, NULL);
1527 		ret = 0;
1528 	}
1529 
1530 	return ret;
1531 }
1532 
1533 /*
1534  * Checks if the slab state looks sane. Assumes the struct slab pointer
1535  * was either obtained in a way that ensures it's valid, or validated
1536  * by validate_slab_ptr()
1537  */
1538 static int check_slab(struct kmem_cache *s, struct slab *slab)
1539 {
1540 	int maxobj;
1541 
1542 	maxobj = order_objects(slab_order(slab), s->size);
1543 	if (slab->objects > maxobj) {
1544 		slab_err(s, slab, "objects %u > max %u",
1545 			slab->objects, maxobj);
1546 		return 0;
1547 	}
1548 	if (slab->inuse > slab->objects) {
1549 		slab_err(s, slab, "inuse %u > max %u",
1550 			slab->inuse, slab->objects);
1551 		return 0;
1552 	}
1553 	if (slab->frozen) {
1554 		slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
1555 		return 0;
1556 	}
1557 
1558 	/* Slab_pad_check fixes things up after itself */
1559 	slab_pad_check(s, slab);
1560 	return 1;
1561 }
1562 
1563 /*
1564  * Determine if a certain object in a slab is on the freelist. Must hold the
1565  * slab lock to guarantee that the chains are in a consistent state.
1566  */
1567 static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1568 {
1569 	int nr = 0;
1570 	void *fp;
1571 	void *object = NULL;
1572 	int max_objects;
1573 
1574 	fp = slab->freelist;
1575 	while (fp && nr <= slab->objects) {
1576 		if (fp == search)
1577 			return true;
1578 		if (!check_valid_pointer(s, slab, fp)) {
1579 			if (object) {
1580 				object_err(s, slab, object,
1581 					"Freechain corrupt");
1582 				set_freepointer(s, object, NULL);
1583 				break;
1584 			} else {
1585 				slab_err(s, slab, "Freepointer corrupt");
1586 				slab->freelist = NULL;
1587 				slab->inuse = slab->objects;
1588 				slab_fix(s, "Freelist cleared");
1589 				return false;
1590 			}
1591 		}
1592 		object = fp;
1593 		fp = get_freepointer(s, object);
1594 		nr++;
1595 	}
1596 
1597 	if (nr > slab->objects) {
1598 		slab_err(s, slab, "Freelist cycle detected");
1599 		slab->freelist = NULL;
1600 		slab->inuse = slab->objects;
1601 		slab_fix(s, "Freelist cleared");
1602 		return false;
1603 	}
1604 
1605 	max_objects = order_objects(slab_order(slab), s->size);
1606 	if (max_objects > MAX_OBJS_PER_PAGE)
1607 		max_objects = MAX_OBJS_PER_PAGE;
1608 
1609 	if (slab->objects != max_objects) {
1610 		slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1611 			 slab->objects, max_objects);
1612 		slab->objects = max_objects;
1613 		slab_fix(s, "Number of objects adjusted");
1614 	}
1615 	if (slab->inuse != slab->objects - nr) {
1616 		slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1617 			 slab->inuse, slab->objects - nr);
1618 		slab->inuse = slab->objects - nr;
1619 		slab_fix(s, "Object count adjusted");
1620 	}
1621 	return search == NULL;
1622 }
1623 
1624 static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1625 								int alloc)
1626 {
1627 	if (s->flags & SLAB_TRACE) {
1628 		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1629 			s->name,
1630 			alloc ? "alloc" : "free",
1631 			object, slab->inuse,
1632 			slab->freelist);
1633 
1634 		if (!alloc)
1635 			print_section(KERN_INFO, "Object ", (void *)object,
1636 					s->object_size);
1637 
1638 		dump_stack();
1639 	}
1640 }
1641 
1642 /*
1643  * Tracking of fully allocated slabs for debugging purposes.
1644  */
1645 static void add_full(struct kmem_cache *s,
1646 	struct kmem_cache_node *n, struct slab *slab)
1647 {
1648 	if (!(s->flags & SLAB_STORE_USER))
1649 		return;
1650 
1651 	lockdep_assert_held(&n->list_lock);
1652 	list_add(&slab->slab_list, &n->full);
1653 }
1654 
1655 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1656 {
1657 	if (!(s->flags & SLAB_STORE_USER))
1658 		return;
1659 
1660 	lockdep_assert_held(&n->list_lock);
1661 	list_del(&slab->slab_list);
1662 }
1663 
1664 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1665 {
1666 	return atomic_long_read(&n->nr_slabs);
1667 }
1668 
1669 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1670 {
1671 	struct kmem_cache_node *n = get_node(s, node);
1672 
1673 	atomic_long_inc(&n->nr_slabs);
1674 	atomic_long_add(objects, &n->total_objects);
1675 }
1676 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1677 {
1678 	struct kmem_cache_node *n = get_node(s, node);
1679 
1680 	atomic_long_dec(&n->nr_slabs);
1681 	atomic_long_sub(objects, &n->total_objects);
1682 }
1683 
1684 /* Object debug checks for alloc/free paths */
1685 static void setup_object_debug(struct kmem_cache *s, void *object)
1686 {
1687 	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1688 		return;
1689 
1690 	init_object(s, object, SLUB_RED_INACTIVE);
1691 	init_tracking(s, object);
1692 }
1693 
1694 static
1695 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1696 {
1697 	if (!kmem_cache_debug_flags(s, SLAB_POISON))
1698 		return;
1699 
1700 	metadata_access_enable();
1701 	memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1702 	metadata_access_disable();
1703 }
1704 
1705 static inline int alloc_consistency_checks(struct kmem_cache *s,
1706 					struct slab *slab, void *object)
1707 {
1708 	if (!check_slab(s, slab))
1709 		return 0;
1710 
1711 	if (!check_valid_pointer(s, slab, object)) {
1712 		object_err(s, slab, object, "Freelist Pointer check fails");
1713 		return 0;
1714 	}
1715 
1716 	if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1717 		return 0;
1718 
1719 	return 1;
1720 }
1721 
1722 static noinline bool alloc_debug_processing(struct kmem_cache *s,
1723 			struct slab *slab, void *object, int orig_size)
1724 {
1725 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1726 		if (!alloc_consistency_checks(s, slab, object))
1727 			goto bad;
1728 	}
1729 
1730 	/* Success. Perform special debug activities for allocs */
1731 	trace(s, slab, object, 1);
1732 	set_orig_size(s, object, orig_size);
1733 	init_object(s, object, SLUB_RED_ACTIVE);
1734 	return true;
1735 
1736 bad:
1737 	/*
1738 	 * Let's do the best we can to avoid issues in the future. Marking all
1739 	 * objects as used avoids touching the remaining objects.
1740 	 */
1741 	slab_fix(s, "Marking all objects used");
1742 	slab->inuse = slab->objects;
1743 	slab->freelist = NULL;
1744 	slab->frozen = 1; /* mark consistency-failed slab as frozen */
1745 
1746 	return false;
1747 }
1748 
1749 static inline int free_consistency_checks(struct kmem_cache *s,
1750 		struct slab *slab, void *object, unsigned long addr)
1751 {
1752 	if (!check_valid_pointer(s, slab, object)) {
1753 		slab_err(s, slab, "Invalid object pointer 0x%p", object);
1754 		return 0;
1755 	}
1756 
1757 	if (on_freelist(s, slab, object)) {
1758 		object_err(s, slab, object, "Object already free");
1759 		return 0;
1760 	}
1761 
1762 	if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1763 		return 0;
1764 
1765 	if (unlikely(s != slab->slab_cache)) {
1766 		if (!slab->slab_cache) {
1767 			slab_err(NULL, slab, "No slab cache for object 0x%p",
1768 				 object);
1769 		} else {
1770 			object_err(s, slab, object,
1771 				   "page slab pointer corrupt.");
1772 		}
1773 		return 0;
1774 	}
1775 	return 1;
1776 }
1777 
1778 /*
1779  * Parse a block of slab_debug options. Blocks are delimited by ';'
1780  *
1781  * @str:    start of block
1782  * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1783  * @slabs:  return start of list of slabs, or NULL when there's no list
1784  * @init:   assume this is initial parsing and not per-kmem-create parsing
1785  *
1786  * returns the start of next block if there's any, or NULL
1787  */
1788 static const char *
1789 parse_slub_debug_flags(const char *str, slab_flags_t *flags, const char **slabs, bool init)
1790 {
1791 	bool higher_order_disable = false;
1792 
1793 	/* Skip any completely empty blocks */
1794 	while (*str && *str == ';')
1795 		str++;
1796 
1797 	if (*str == ',') {
1798 		/*
1799 		 * No options but restriction on slabs. This means full
1800 		 * debugging for slabs matching a pattern.
1801 		 */
1802 		*flags = DEBUG_DEFAULT_FLAGS;
1803 		goto check_slabs;
1804 	}
1805 	*flags = 0;
1806 
1807 	/* Determine which debug features should be switched on */
1808 	for (; *str && *str != ',' && *str != ';'; str++) {
1809 		switch (tolower(*str)) {
1810 		case '-':
1811 			*flags = 0;
1812 			break;
1813 		case 'f':
1814 			*flags |= SLAB_CONSISTENCY_CHECKS;
1815 			break;
1816 		case 'z':
1817 			*flags |= SLAB_RED_ZONE;
1818 			break;
1819 		case 'p':
1820 			*flags |= SLAB_POISON;
1821 			break;
1822 		case 'u':
1823 			*flags |= SLAB_STORE_USER;
1824 			break;
1825 		case 't':
1826 			*flags |= SLAB_TRACE;
1827 			break;
1828 		case 'a':
1829 			*flags |= SLAB_FAILSLAB;
1830 			break;
1831 		case 'o':
1832 			/*
1833 			 * Avoid enabling debugging on caches if its minimum
1834 			 * order would increase as a result.
1835 			 */
1836 			higher_order_disable = true;
1837 			break;
1838 		default:
1839 			if (init)
1840 				pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1841 		}
1842 	}
1843 check_slabs:
1844 	if (*str == ',')
1845 		*slabs = ++str;
1846 	else
1847 		*slabs = NULL;
1848 
1849 	/* Skip over the slab list */
1850 	while (*str && *str != ';')
1851 		str++;
1852 
1853 	/* Skip any completely empty blocks */
1854 	while (*str && *str == ';')
1855 		str++;
1856 
1857 	if (init && higher_order_disable)
1858 		disable_higher_order_debug = 1;
1859 
1860 	if (*str)
1861 		return str;
1862 	else
1863 		return NULL;
1864 }
1865 
1866 static int __init setup_slub_debug(const char *str, const struct kernel_param *kp)
1867 {
1868 	slab_flags_t flags;
1869 	slab_flags_t global_flags;
1870 	const char *saved_str;
1871 	const char *slab_list;
1872 	bool global_slub_debug_changed = false;
1873 	bool slab_list_specified = false;
1874 
1875 	global_flags = DEBUG_DEFAULT_FLAGS;
1876 	if (!str || !*str)
1877 		/*
1878 		 * No options specified. Switch on full debugging.
1879 		 */
1880 		goto out;
1881 
1882 	saved_str = str;
1883 	while (str) {
1884 		str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1885 
1886 		if (!slab_list) {
1887 			global_flags = flags;
1888 			global_slub_debug_changed = true;
1889 		} else {
1890 			slab_list_specified = true;
1891 			if (flags & SLAB_STORE_USER)
1892 				stack_depot_request_early_init();
1893 		}
1894 	}
1895 
1896 	/*
1897 	 * For backwards compatibility, a single list of flags with list of
1898 	 * slabs means debugging is only changed for those slabs, so the global
1899 	 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1900 	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1901 	 * long as there is no option specifying flags without a slab list.
1902 	 */
1903 	if (slab_list_specified) {
1904 		if (!global_slub_debug_changed)
1905 			global_flags = slub_debug;
1906 		slub_debug_string = saved_str;
1907 	}
1908 out:
1909 	slub_debug = global_flags;
1910 	if (slub_debug & SLAB_STORE_USER)
1911 		stack_depot_request_early_init();
1912 	if (slub_debug != 0 || slub_debug_string)
1913 		static_branch_enable(&slub_debug_enabled);
1914 	else
1915 		static_branch_disable(&slub_debug_enabled);
1916 	if ((static_branch_unlikely(&init_on_alloc) ||
1917 	     static_branch_unlikely(&init_on_free)) &&
1918 	    (slub_debug & SLAB_POISON))
1919 		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1920 	return 0;
1921 }
1922 
1923 static const struct kernel_param_ops param_ops_slab_debug __initconst = {
1924 	.flags = KERNEL_PARAM_OPS_FL_NOARG,
1925 	.set = setup_slub_debug,
1926 };
1927 __core_param_cb(slab_debug, &param_ops_slab_debug, NULL, 0);
1928 __core_param_cb(slub_debug, &param_ops_slab_debug, NULL, 0);
1929 
1930 /*
1931  * kmem_cache_flags - apply debugging options to the cache
1932  * @flags:		flags to set
1933  * @name:		name of the cache
1934  *
1935  * Debug option(s) are applied to @flags. In addition to the debug
1936  * option(s), if a slab name (or multiple) is specified i.e.
1937  * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1938  * then only the select slabs will receive the debug option(s).
1939  */
1940 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1941 {
1942 	const char *iter;
1943 	size_t len;
1944 	const char *next_block;
1945 	slab_flags_t block_flags;
1946 	slab_flags_t slub_debug_local = slub_debug;
1947 
1948 	if (flags & SLAB_NO_USER_FLAGS)
1949 		return flags;
1950 
1951 	/*
1952 	 * If the slab cache is for debugging (e.g. kmemleak) then
1953 	 * don't store user (stack trace) information by default,
1954 	 * but let the user enable it via the command line below.
1955 	 */
1956 	if (flags & SLAB_NOLEAKTRACE)
1957 		slub_debug_local &= ~SLAB_STORE_USER;
1958 
1959 	len = strlen(name);
1960 	next_block = slub_debug_string;
1961 	/* Go through all blocks of debug options, see if any matches our slab's name */
1962 	while (next_block) {
1963 		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1964 		if (!iter)
1965 			continue;
1966 		/* Found a block that has a slab list, search it */
1967 		while (*iter) {
1968 			const char *end, *glob;
1969 			size_t cmplen;
1970 
1971 			end = strchrnul(iter, ',');
1972 			if (next_block && next_block < end)
1973 				end = next_block - 1;
1974 
1975 			glob = strnchr(iter, end - iter, '*');
1976 			if (glob)
1977 				cmplen = glob - iter;
1978 			else
1979 				cmplen = max_t(size_t, len, (end - iter));
1980 
1981 			if (!strncmp(name, iter, cmplen)) {
1982 				flags |= block_flags;
1983 				return flags;
1984 			}
1985 
1986 			if (!*end || *end == ';')
1987 				break;
1988 			iter = end + 1;
1989 		}
1990 	}
1991 
1992 	return flags | slub_debug_local;
1993 }
1994 #else /* !CONFIG_SLUB_DEBUG */
1995 static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1996 static inline
1997 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1998 
1999 static inline bool alloc_debug_processing(struct kmem_cache *s,
2000 	struct slab *slab, void *object, int orig_size) { return true; }
2001 
2002 static inline bool free_debug_processing(struct kmem_cache *s,
2003 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
2004 	unsigned long addr, depot_stack_handle_t handle) { return true; }
2005 
2006 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
2007 static inline int check_object(struct kmem_cache *s, struct slab *slab,
2008 			void *object, u8 val) { return 1; }
2009 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
2010 static inline void set_track(struct kmem_cache *s, void *object,
2011 			     enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
2012 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
2013 					struct slab *slab) {}
2014 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
2015 					struct slab *slab) {}
2016 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
2017 {
2018 	return flags;
2019 }
2020 #define slub_debug 0
2021 
2022 #define disable_higher_order_debug 0
2023 
2024 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
2025 							{ return 0; }
2026 static inline void inc_slabs_node(struct kmem_cache *s, int node,
2027 							int objects) {}
2028 static inline void dec_slabs_node(struct kmem_cache *s, int node,
2029 							int objects) {}
2030 #endif /* CONFIG_SLUB_DEBUG */
2031 
2032 /*
2033  * The allocated objcg pointers array is not accounted directly.
2034  * Moreover, it should not come from DMA buffer and is not readily
2035  * reclaimable. So those GFP bits should be masked off.
2036  */
2037 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2038 				__GFP_ACCOUNT | __GFP_NOFAIL)
2039 
2040 #ifdef CONFIG_SLAB_OBJ_EXT
2041 
2042 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
2043 
2044 static inline void mark_obj_codetag_empty(const void *obj)
2045 {
2046 	struct slab *obj_slab;
2047 	unsigned long slab_exts;
2048 
2049 	obj_slab = virt_to_slab(obj);
2050 	slab_exts = slab_obj_exts(obj_slab);
2051 	if (slab_exts) {
2052 		get_slab_obj_exts(slab_exts);
2053 		unsigned int offs = obj_to_index(obj_slab->slab_cache,
2054 						 obj_slab, obj);
2055 		struct slabobj_ext *ext = slab_obj_ext(obj_slab,
2056 						       slab_exts, offs);
2057 
2058 		if (unlikely(is_codetag_empty(&ext->ref))) {
2059 			put_slab_obj_exts(slab_exts);
2060 			return;
2061 		}
2062 
2063 		/* codetag should be NULL here */
2064 		WARN_ON(ext->ref.ct);
2065 		set_codetag_empty(&ext->ref);
2066 		put_slab_obj_exts(slab_exts);
2067 	}
2068 }
2069 
2070 static inline bool mark_failed_objexts_alloc(struct slab *slab)
2071 {
2072 	return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
2073 }
2074 
2075 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
2076 			struct slabobj_ext *vec, unsigned int objects)
2077 {
2078 	/*
2079 	 * If vector previously failed to allocate then we have live
2080 	 * objects with no tag reference. Mark all references in this
2081 	 * vector as empty to avoid warnings later on.
2082 	 */
2083 	if (obj_exts == OBJEXTS_ALLOC_FAIL) {
2084 		unsigned int i;
2085 
2086 		for (i = 0; i < objects; i++)
2087 			set_codetag_empty(&vec[i].ref);
2088 	}
2089 }
2090 
2091 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
2092 
2093 static inline void mark_obj_codetag_empty(const void *obj) {}
2094 static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
2095 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
2096 			struct slabobj_ext *vec, unsigned int objects) {}
2097 
2098 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
2099 
2100 static inline void init_slab_obj_exts(struct slab *slab)
2101 {
2102 	slab->obj_exts = 0;
2103 }
2104 
2105 /*
2106  * Calculate the allocation size for slabobj_ext array.
2107  *
2108  * When memory allocation profiling is enabled, the obj_exts array
2109  * could be allocated from the same slab cache it's being allocated for.
2110  * This would prevent the slab from ever being freed because it would
2111  * always contain at least one allocated object (its own obj_exts array).
2112  *
2113  * To avoid this, increase the allocation size when we detect the array
2114  * may come from the same cache, forcing it to use a different cache.
2115  */
2116 static inline size_t obj_exts_alloc_size(struct kmem_cache *s,
2117 					 struct slab *slab, gfp_t gfp)
2118 {
2119 	size_t sz = sizeof(struct slabobj_ext) * slab->objects;
2120 	struct kmem_cache *obj_exts_cache;
2121 
2122 	if (sz > KMALLOC_MAX_CACHE_SIZE)
2123 		return sz;
2124 
2125 	if (!is_kmalloc_normal(s))
2126 		return sz;
2127 
2128 	obj_exts_cache = kmalloc_slab(sz, NULL, gfp, 0);
2129 	/*
2130 	 * We can't simply compare s with obj_exts_cache, because random kmalloc
2131 	 * caches have multiple caches per size, selected by caller address.
2132 	 * Since caller address may differ between kmalloc_slab() and actual
2133 	 * allocation, bump size when sizes are equal.
2134 	 */
2135 	if (s->object_size == obj_exts_cache->object_size)
2136 		return obj_exts_cache->object_size + 1;
2137 
2138 	return sz;
2139 }
2140 
2141 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2142 		        gfp_t gfp, bool new_slab)
2143 {
2144 	bool allow_spin = gfpflags_allow_spinning(gfp);
2145 	unsigned int objects = objs_per_slab(s, slab);
2146 	unsigned long new_exts;
2147 	unsigned long old_exts;
2148 	struct slabobj_ext *vec;
2149 	size_t sz;
2150 
2151 	gfp &= ~OBJCGS_CLEAR_MASK;
2152 	/* Prevent recursive extension vector allocation */
2153 	gfp |= __GFP_NO_OBJ_EXT;
2154 
2155 	sz = obj_exts_alloc_size(s, slab, gfp);
2156 
2157 	/*
2158 	 * Note that allow_spin may be false during early boot and its
2159 	 * restricted GFP_BOOT_MASK. Due to kmalloc_nolock() only supporting
2160 	 * architectures with cmpxchg16b, early obj_exts will be missing for
2161 	 * very early allocations on those.
2162 	 */
2163 	if (unlikely(!allow_spin))
2164 		vec = kmalloc_nolock(sz, __GFP_ZERO | __GFP_NO_OBJ_EXT,
2165 				     slab_nid(slab));
2166 	else
2167 		vec = kmalloc_node(sz, gfp | __GFP_ZERO, slab_nid(slab));
2168 
2169 	if (!vec) {
2170 		/*
2171 		 * Try to mark vectors which failed to allocate.
2172 		 * If this operation fails, there may be a racing process
2173 		 * that has already completed the allocation.
2174 		 */
2175 		if (!mark_failed_objexts_alloc(slab) &&
2176 		    slab_obj_exts(slab))
2177 			return 0;
2178 
2179 		return -ENOMEM;
2180 	}
2181 
2182 	VM_WARN_ON_ONCE(virt_to_slab(vec) != NULL &&
2183 			virt_to_slab(vec)->slab_cache == s);
2184 
2185 	new_exts = (unsigned long)vec;
2186 #ifdef CONFIG_MEMCG
2187 	new_exts |= MEMCG_DATA_OBJEXTS;
2188 #endif
2189 retry:
2190 	old_exts = READ_ONCE(slab->obj_exts);
2191 	handle_failed_objexts_alloc(old_exts, vec, objects);
2192 
2193 	if (new_slab) {
2194 		/*
2195 		 * If the slab is brand new and nobody can yet access its
2196 		 * obj_exts, no synchronization is required and obj_exts can
2197 		 * be simply assigned.
2198 		 */
2199 		slab->obj_exts = new_exts;
2200 	} else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
2201 		/*
2202 		 * If the slab is already in use, somebody can allocate and
2203 		 * assign slabobj_exts in parallel. In this case the existing
2204 		 * objcg vector should be reused.
2205 		 */
2206 		mark_obj_codetag_empty(vec);
2207 		if (unlikely(!allow_spin))
2208 			kfree_nolock(vec);
2209 		else
2210 			kfree(vec);
2211 		return 0;
2212 	} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
2213 		/* Retry if a racing thread changed slab->obj_exts from under us. */
2214 		goto retry;
2215 	}
2216 
2217 	if (allow_spin)
2218 		kmemleak_not_leak(vec);
2219 	return 0;
2220 }
2221 
2222 static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
2223 {
2224 	struct slabobj_ext *obj_exts;
2225 
2226 	obj_exts = (struct slabobj_ext *)slab_obj_exts(slab);
2227 	if (!obj_exts) {
2228 		/*
2229 		 * If obj_exts allocation failed, slab->obj_exts is set to
2230 		 * OBJEXTS_ALLOC_FAIL. In this case, we end up here and should
2231 		 * clear the flag.
2232 		 */
2233 		slab->obj_exts = 0;
2234 		return;
2235 	}
2236 
2237 	if (obj_exts_in_slab(slab->slab_cache, slab)) {
2238 		slab->obj_exts = 0;
2239 		return;
2240 	}
2241 
2242 	/*
2243 	 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
2244 	 * corresponding extension will be NULL. alloc_tag_sub() will throw a
2245 	 * warning if slab has extensions but the extension of an object is
2246 	 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
2247 	 * the extension for obj_exts is expected to be NULL.
2248 	 */
2249 	mark_obj_codetag_empty(obj_exts);
2250 	if (allow_spin)
2251 		kfree(obj_exts);
2252 	else
2253 		kfree_nolock(obj_exts);
2254 	slab->obj_exts = 0;
2255 }
2256 
2257 /*
2258  * Try to allocate slabobj_ext array from unused space.
2259  * This function must be called on a freshly allocated slab to prevent
2260  * concurrency problems.
2261  */
2262 static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
2263 {
2264 	void *addr;
2265 	unsigned long obj_exts;
2266 
2267 	/* Initialize stride early to avoid memory ordering issues */
2268 	slab_set_stride(slab, sizeof(struct slabobj_ext));
2269 
2270 	if (!need_slab_obj_exts(s))
2271 		return;
2272 
2273 	if (obj_exts_fit_within_slab_leftover(s, slab)) {
2274 		addr = slab_address(slab) + obj_exts_offset_in_slab(s, slab);
2275 		addr = kasan_reset_tag(addr);
2276 		obj_exts = (unsigned long)addr;
2277 
2278 		get_slab_obj_exts(obj_exts);
2279 		memset(addr, 0, obj_exts_size_in_slab(slab));
2280 		put_slab_obj_exts(obj_exts);
2281 
2282 #ifdef CONFIG_MEMCG
2283 		obj_exts |= MEMCG_DATA_OBJEXTS;
2284 #endif
2285 		slab->obj_exts = obj_exts;
2286 	} else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
2287 		unsigned int offset = obj_exts_offset_in_object(s);
2288 
2289 		obj_exts = (unsigned long)slab_address(slab);
2290 		obj_exts += s->red_left_pad;
2291 		obj_exts += offset;
2292 
2293 		get_slab_obj_exts(obj_exts);
2294 		for_each_object(addr, s, slab_address(slab), slab->objects)
2295 			memset(kasan_reset_tag(addr) + offset, 0,
2296 			       sizeof(struct slabobj_ext));
2297 		put_slab_obj_exts(obj_exts);
2298 
2299 #ifdef CONFIG_MEMCG
2300 		obj_exts |= MEMCG_DATA_OBJEXTS;
2301 #endif
2302 		slab->obj_exts = obj_exts;
2303 		slab_set_stride(slab, s->size);
2304 	}
2305 }
2306 
2307 #else /* CONFIG_SLAB_OBJ_EXT */
2308 
2309 static inline void mark_obj_codetag_empty(const void *obj)
2310 {
2311 }
2312 
2313 static inline void init_slab_obj_exts(struct slab *slab)
2314 {
2315 }
2316 
2317 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2318 			       gfp_t gfp, bool new_slab)
2319 {
2320 	return 0;
2321 }
2322 
2323 static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
2324 {
2325 }
2326 
2327 static inline void alloc_slab_obj_exts_early(struct kmem_cache *s,
2328 						       struct slab *slab)
2329 {
2330 }
2331 
2332 #endif /* CONFIG_SLAB_OBJ_EXT */
2333 
2334 #ifdef CONFIG_MEM_ALLOC_PROFILING
2335 
2336 static inline unsigned long
2337 prepare_slab_obj_exts_hook(struct kmem_cache *s, struct slab *slab,
2338 			   gfp_t flags, void *p)
2339 {
2340 	if (!slab_obj_exts(slab) &&
2341 	    alloc_slab_obj_exts(slab, s, flags, false)) {
2342 		pr_warn_once("%s, %s: Failed to create slab extension vector!\n",
2343 			     __func__, s->name);
2344 		return 0;
2345 	}
2346 
2347 	return slab_obj_exts(slab);
2348 }
2349 
2350 
2351 /* Should be called only if mem_alloc_profiling_enabled() */
2352 static noinline void
2353 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2354 {
2355 	unsigned long obj_exts;
2356 	struct slabobj_ext *obj_ext;
2357 	struct slab *slab;
2358 
2359 	if (!object)
2360 		return;
2361 
2362 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2363 		return;
2364 
2365 	if (flags & __GFP_NO_OBJ_EXT)
2366 		return;
2367 
2368 	slab = virt_to_slab(object);
2369 	obj_exts = prepare_slab_obj_exts_hook(s, slab, flags, object);
2370 	/*
2371 	 * Currently obj_exts is used only for allocation profiling.
2372 	 * If other users appear then mem_alloc_profiling_enabled()
2373 	 * check should be added before alloc_tag_add().
2374 	 */
2375 	if (obj_exts) {
2376 		unsigned int obj_idx = obj_to_index(s, slab, object);
2377 
2378 		get_slab_obj_exts(obj_exts);
2379 		obj_ext = slab_obj_ext(slab, obj_exts, obj_idx);
2380 		alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
2381 		put_slab_obj_exts(obj_exts);
2382 	} else {
2383 		alloc_tag_set_inaccurate(current->alloc_tag);
2384 	}
2385 }
2386 
2387 static inline void
2388 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2389 {
2390 	if (mem_alloc_profiling_enabled())
2391 		__alloc_tagging_slab_alloc_hook(s, object, flags);
2392 }
2393 
2394 /* Should be called only if mem_alloc_profiling_enabled() */
2395 static noinline void
2396 __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2397 			       int objects)
2398 {
2399 	int i;
2400 	unsigned long obj_exts;
2401 
2402 	/* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
2403 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2404 		return;
2405 
2406 	obj_exts = slab_obj_exts(slab);
2407 	if (!obj_exts)
2408 		return;
2409 
2410 	get_slab_obj_exts(obj_exts);
2411 	for (i = 0; i < objects; i++) {
2412 		unsigned int off = obj_to_index(s, slab, p[i]);
2413 
2414 		alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
2415 	}
2416 	put_slab_obj_exts(obj_exts);
2417 }
2418 
2419 static inline void
2420 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2421 			     int objects)
2422 {
2423 	if (mem_alloc_profiling_enabled())
2424 		__alloc_tagging_slab_free_hook(s, slab, p, objects);
2425 }
2426 
2427 #else /* CONFIG_MEM_ALLOC_PROFILING */
2428 
2429 static inline void
2430 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2431 {
2432 }
2433 
2434 static inline void
2435 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2436 			     int objects)
2437 {
2438 }
2439 
2440 #endif /* CONFIG_MEM_ALLOC_PROFILING */
2441 
2442 
2443 #ifdef CONFIG_MEMCG
2444 
2445 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
2446 
2447 static __fastpath_inline
2448 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2449 				gfp_t flags, size_t size, void **p)
2450 {
2451 	if (likely(!memcg_kmem_online()))
2452 		return true;
2453 
2454 	if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2455 		return true;
2456 
2457 	if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
2458 		return true;
2459 
2460 	if (likely(size == 1)) {
2461 		memcg_alloc_abort_single(s, *p);
2462 		*p = NULL;
2463 	} else {
2464 		kmem_cache_free_bulk(s, size, p);
2465 	}
2466 
2467 	return false;
2468 }
2469 
2470 static __fastpath_inline
2471 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2472 			  int objects)
2473 {
2474 	unsigned long obj_exts;
2475 
2476 	if (!memcg_kmem_online())
2477 		return;
2478 
2479 	obj_exts = slab_obj_exts(slab);
2480 	if (likely(!obj_exts))
2481 		return;
2482 
2483 	get_slab_obj_exts(obj_exts);
2484 	__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2485 	put_slab_obj_exts(obj_exts);
2486 }
2487 
2488 static __fastpath_inline
2489 bool memcg_slab_post_charge(void *p, gfp_t flags)
2490 {
2491 	unsigned long obj_exts;
2492 	struct slabobj_ext *obj_ext;
2493 	struct kmem_cache *s;
2494 	struct page *page;
2495 	struct slab *slab;
2496 	unsigned long off;
2497 
2498 	page = virt_to_page(p);
2499 	if (PageLargeKmalloc(page)) {
2500 		unsigned int order;
2501 		int size;
2502 
2503 		if (PageMemcgKmem(page))
2504 			return true;
2505 
2506 		order = large_kmalloc_order(page);
2507 		if (__memcg_kmem_charge_page(page, flags, order))
2508 			return false;
2509 
2510 		/*
2511 		 * This page has already been accounted in the global stats but
2512 		 * not in the memcg stats. So, subtract from the global and use
2513 		 * the interface which adds to both global and memcg stats.
2514 		 */
2515 		size = PAGE_SIZE << order;
2516 		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -size);
2517 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, size);
2518 		return true;
2519 	}
2520 
2521 	slab = page_slab(page);
2522 	s = slab->slab_cache;
2523 
2524 	/*
2525 	 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
2526 	 * of slab_obj_exts being allocated from the same slab and thus the slab
2527 	 * becoming effectively unfreeable.
2528 	 */
2529 	if (is_kmalloc_normal(s))
2530 		return true;
2531 
2532 	/* Ignore already charged objects. */
2533 	obj_exts = slab_obj_exts(slab);
2534 	if (obj_exts) {
2535 		get_slab_obj_exts(obj_exts);
2536 		off = obj_to_index(s, slab, p);
2537 		obj_ext = slab_obj_ext(slab, obj_exts, off);
2538 		if (unlikely(obj_ext->objcg)) {
2539 			put_slab_obj_exts(obj_exts);
2540 			return true;
2541 		}
2542 		put_slab_obj_exts(obj_exts);
2543 	}
2544 
2545 	return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
2546 }
2547 
2548 #else /* CONFIG_MEMCG */
2549 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
2550 					      struct list_lru *lru,
2551 					      gfp_t flags, size_t size,
2552 					      void **p)
2553 {
2554 	return true;
2555 }
2556 
2557 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2558 					void **p, int objects)
2559 {
2560 }
2561 
2562 static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
2563 {
2564 	return true;
2565 }
2566 #endif /* CONFIG_MEMCG */
2567 
2568 #ifdef CONFIG_SLUB_RCU_DEBUG
2569 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
2570 
2571 struct rcu_delayed_free {
2572 	struct rcu_head head;
2573 	void *object;
2574 };
2575 #endif
2576 
2577 /*
2578  * Hooks for other subsystems that check memory allocations. In a typical
2579  * production configuration these hooks all should produce no code at all.
2580  *
2581  * Returns true if freeing of the object can proceed, false if its reuse
2582  * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
2583  * to KFENCE.
2584  *
2585  * For objects allocated via kmalloc_nolock(), only a subset of alloc hooks
2586  * are invoked, so some free hooks must handle asymmetric hook calls.
2587  *
2588  * Alloc hooks called for kmalloc_nolock():
2589  * - kmsan_slab_alloc()
2590  * - kasan_slab_alloc()
2591  * - memcg_slab_post_alloc_hook()
2592  * - alloc_tagging_slab_alloc_hook()
2593  *
2594  * Free hooks that must handle missing corresponding alloc hooks:
2595  * - kmemleak_free_recursive()
2596  * - kfence_free()
2597  *
2598  * Free hooks that have no alloc hook counterpart, and thus safe to call:
2599  * - debug_check_no_locks_freed()
2600  * - debug_check_no_obj_freed()
2601  * - __kcsan_check_access()
2602  */
2603 static __always_inline
2604 bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
2605 		    bool after_rcu_delay)
2606 {
2607 	/* Are the object contents still accessible? */
2608 	bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
2609 
2610 	kmemleak_free_recursive(x, s->flags);
2611 	kmsan_slab_free(s, x);
2612 
2613 	debug_check_no_locks_freed(x, s->object_size);
2614 
2615 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
2616 		debug_check_no_obj_freed(x, s->object_size);
2617 
2618 	/* Use KCSAN to help debug racy use-after-free. */
2619 	if (!still_accessible)
2620 		__kcsan_check_access(x, s->object_size,
2621 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2622 
2623 	if (kfence_free(x))
2624 		return false;
2625 
2626 	/*
2627 	 * Give KASAN a chance to notice an invalid free operation before we
2628 	 * modify the object.
2629 	 */
2630 	if (kasan_slab_pre_free(s, x))
2631 		return false;
2632 
2633 #ifdef CONFIG_SLUB_RCU_DEBUG
2634 	if (still_accessible) {
2635 		struct rcu_delayed_free *delayed_free;
2636 
2637 		delayed_free = kmalloc_obj(*delayed_free, GFP_NOWAIT);
2638 		if (delayed_free) {
2639 			/*
2640 			 * Let KASAN track our call stack as a "related work
2641 			 * creation", just like if the object had been freed
2642 			 * normally via kfree_rcu().
2643 			 * We have to do this manually because the rcu_head is
2644 			 * not located inside the object.
2645 			 */
2646 			kasan_record_aux_stack(x);
2647 
2648 			delayed_free->object = x;
2649 			call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
2650 			return false;
2651 		}
2652 	}
2653 #endif /* CONFIG_SLUB_RCU_DEBUG */
2654 
2655 	/*
2656 	 * As memory initialization might be integrated into KASAN,
2657 	 * kasan_slab_free and initialization memset's must be
2658 	 * kept together to avoid discrepancies in behavior.
2659 	 *
2660 	 * The initialization memset's clear the object and the metadata,
2661 	 * but don't touch the SLAB redzone.
2662 	 *
2663 	 * The object's freepointer is also avoided if stored outside the
2664 	 * object.
2665 	 */
2666 	if (unlikely(init)) {
2667 		int rsize;
2668 		unsigned int inuse, orig_size;
2669 
2670 		inuse = get_info_end(s);
2671 		orig_size = get_orig_size(s, x);
2672 		if (!kasan_has_integrated_init())
2673 			memset(kasan_reset_tag(x), 0, orig_size);
2674 		rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2675 		memset((char *)kasan_reset_tag(x) + inuse, 0,
2676 		       s->size - inuse - rsize);
2677 		/*
2678 		 * Restore orig_size, otherwise kmalloc redzone overwritten
2679 		 * would be reported
2680 		 */
2681 		set_orig_size(s, x, orig_size);
2682 
2683 	}
2684 	/* KASAN might put x into memory quarantine, delaying its reuse. */
2685 	return !kasan_slab_free(s, x, init, still_accessible, false);
2686 }
2687 
2688 static __fastpath_inline
2689 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
2690 			     int *cnt)
2691 {
2692 
2693 	void *object;
2694 	void *next = *head;
2695 	void *old_tail = *tail;
2696 	bool init;
2697 
2698 	if (is_kfence_address(next)) {
2699 		slab_free_hook(s, next, false, false);
2700 		return false;
2701 	}
2702 
2703 	/* Head and tail of the reconstructed freelist */
2704 	*head = NULL;
2705 	*tail = NULL;
2706 
2707 	init = slab_want_init_on_free(s);
2708 
2709 	do {
2710 		object = next;
2711 		next = get_freepointer(s, object);
2712 
2713 		/* If object's reuse doesn't have to be delayed */
2714 		if (likely(slab_free_hook(s, object, init, false))) {
2715 			/* Move object to the new freelist */
2716 			set_freepointer(s, object, *head);
2717 			*head = object;
2718 			if (!*tail)
2719 				*tail = object;
2720 		} else {
2721 			/*
2722 			 * Adjust the reconstructed freelist depth
2723 			 * accordingly if object's reuse is delayed.
2724 			 */
2725 			--(*cnt);
2726 		}
2727 	} while (object != old_tail);
2728 
2729 	return *head != NULL;
2730 }
2731 
2732 static void *setup_object(struct kmem_cache *s, void *object)
2733 {
2734 	setup_object_debug(s, object);
2735 	object = kasan_init_slab_obj(s, object);
2736 	if (unlikely(s->ctor)) {
2737 		kasan_unpoison_new_object(s, object);
2738 		s->ctor(object);
2739 		kasan_poison_new_object(s, object);
2740 	}
2741 	return object;
2742 }
2743 
2744 static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp,
2745 					      unsigned int capacity)
2746 {
2747 	struct slab_sheaf *sheaf;
2748 	size_t sheaf_size;
2749 
2750 	if (gfp & __GFP_NO_OBJ_EXT)
2751 		return NULL;
2752 
2753 	gfp &= ~OBJCGS_CLEAR_MASK;
2754 
2755 	/*
2756 	 * Prevent recursion to the same cache, or a deep stack of kmallocs of
2757 	 * varying sizes (sheaf capacity might differ for each kmalloc size
2758 	 * bucket)
2759 	 */
2760 	if (s->flags & SLAB_KMALLOC)
2761 		gfp |= __GFP_NO_OBJ_EXT;
2762 
2763 	sheaf_size = struct_size(sheaf, objects, capacity);
2764 	sheaf = kzalloc(sheaf_size, gfp);
2765 
2766 	if (unlikely(!sheaf))
2767 		return NULL;
2768 
2769 	sheaf->cache = s;
2770 
2771 	stat(s, SHEAF_ALLOC);
2772 
2773 	return sheaf;
2774 }
2775 
2776 static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
2777 						   gfp_t gfp)
2778 {
2779 	return __alloc_empty_sheaf(s, gfp, s->sheaf_capacity);
2780 }
2781 
2782 static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
2783 {
2784 	/*
2785 	 * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
2786 	 * corresponding extension is NULL and alloc_tag_sub() will throw a
2787 	 * warning, therefore replace NULL with CODETAG_EMPTY to indicate
2788 	 * that the extension for this sheaf is expected to be NULL.
2789 	 */
2790 	if (s->flags & SLAB_KMALLOC)
2791 		mark_obj_codetag_empty(sheaf);
2792 
2793 	VM_WARN_ON_ONCE(sheaf->size > 0);
2794 	kfree(sheaf);
2795 
2796 	stat(s, SHEAF_FREE);
2797 }
2798 
2799 static unsigned int
2800 refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
2801 	       unsigned int max);
2802 
2803 static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf,
2804 			 gfp_t gfp)
2805 {
2806 	int to_fill = s->sheaf_capacity - sheaf->size;
2807 	int filled;
2808 
2809 	if (!to_fill)
2810 		return 0;
2811 
2812 	filled = refill_objects(s, &sheaf->objects[sheaf->size], gfp, to_fill,
2813 				to_fill);
2814 
2815 	sheaf->size += filled;
2816 
2817 	stat_add(s, SHEAF_REFILL, filled);
2818 
2819 	if (filled < to_fill)
2820 		return -ENOMEM;
2821 
2822 	return 0;
2823 }
2824 
2825 static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf);
2826 
2827 static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
2828 {
2829 	struct slab_sheaf *sheaf = alloc_empty_sheaf(s, gfp);
2830 
2831 	if (!sheaf)
2832 		return NULL;
2833 
2834 	if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
2835 		sheaf_flush_unused(s, sheaf);
2836 		free_empty_sheaf(s, sheaf);
2837 		return NULL;
2838 	}
2839 
2840 	return sheaf;
2841 }
2842 
2843 /*
2844  * Maximum number of objects freed during a single flush of main pcs sheaf.
2845  * Translates directly to an on-stack array size.
2846  */
2847 #define PCS_BATCH_MAX	32U
2848 
2849 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
2850 
2851 /*
2852  * Free all objects from the main sheaf. In order to perform
2853  * __kmem_cache_free_bulk() outside of cpu_sheaves->lock, work in batches where
2854  * object pointers are moved to a on-stack array under the lock. To bound the
2855  * stack usage, limit each batch to PCS_BATCH_MAX.
2856  *
2857  * Must be called with s->cpu_sheaves->lock locked, returns with the lock
2858  * unlocked.
2859  *
2860  * Returns how many objects are remaining to be flushed
2861  */
2862 static unsigned int __sheaf_flush_main_batch(struct kmem_cache *s)
2863 {
2864 	struct slub_percpu_sheaves *pcs;
2865 	unsigned int batch, remaining;
2866 	void *objects[PCS_BATCH_MAX];
2867 	struct slab_sheaf *sheaf;
2868 
2869 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
2870 
2871 	pcs = this_cpu_ptr(s->cpu_sheaves);
2872 	sheaf = pcs->main;
2873 
2874 	batch = min(PCS_BATCH_MAX, sheaf->size);
2875 
2876 	sheaf->size -= batch;
2877 	memcpy(objects, sheaf->objects + sheaf->size, batch * sizeof(void *));
2878 
2879 	remaining = sheaf->size;
2880 
2881 	local_unlock(&s->cpu_sheaves->lock);
2882 
2883 	__kmem_cache_free_bulk(s, batch, &objects[0]);
2884 
2885 	stat_add(s, SHEAF_FLUSH, batch);
2886 
2887 	return remaining;
2888 }
2889 
2890 static void sheaf_flush_main(struct kmem_cache *s)
2891 {
2892 	unsigned int remaining;
2893 
2894 	do {
2895 		local_lock(&s->cpu_sheaves->lock);
2896 
2897 		remaining = __sheaf_flush_main_batch(s);
2898 
2899 	} while (remaining);
2900 }
2901 
2902 /*
2903  * Returns true if the main sheaf was at least partially flushed.
2904  */
2905 static bool sheaf_try_flush_main(struct kmem_cache *s)
2906 {
2907 	unsigned int remaining;
2908 	bool ret = false;
2909 
2910 	do {
2911 		if (!local_trylock(&s->cpu_sheaves->lock))
2912 			return ret;
2913 
2914 		ret = true;
2915 		remaining = __sheaf_flush_main_batch(s);
2916 
2917 	} while (remaining);
2918 
2919 	return ret;
2920 }
2921 
2922 /*
2923  * Free all objects from a sheaf that's unused, i.e. not linked to any
2924  * cpu_sheaves, so we need no locking and batching. The locking is also not
2925  * necessary when flushing cpu's sheaves (both spare and main) during cpu
2926  * hotremove as the cpu is not executing anymore.
2927  */
2928 static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf)
2929 {
2930 	if (!sheaf->size)
2931 		return;
2932 
2933 	stat_add(s, SHEAF_FLUSH, sheaf->size);
2934 
2935 	__kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
2936 
2937 	sheaf->size = 0;
2938 }
2939 
2940 static bool __rcu_free_sheaf_prepare(struct kmem_cache *s,
2941 				     struct slab_sheaf *sheaf)
2942 {
2943 	bool init = slab_want_init_on_free(s);
2944 	void **p = &sheaf->objects[0];
2945 	unsigned int i = 0;
2946 	bool pfmemalloc = false;
2947 
2948 	while (i < sheaf->size) {
2949 		struct slab *slab = virt_to_slab(p[i]);
2950 
2951 		memcg_slab_free_hook(s, slab, p + i, 1);
2952 		alloc_tagging_slab_free_hook(s, slab, p + i, 1);
2953 
2954 		if (unlikely(!slab_free_hook(s, p[i], init, true))) {
2955 			p[i] = p[--sheaf->size];
2956 			continue;
2957 		}
2958 
2959 		if (slab_test_pfmemalloc(slab))
2960 			pfmemalloc = true;
2961 
2962 		i++;
2963 	}
2964 
2965 	return pfmemalloc;
2966 }
2967 
2968 static void rcu_free_sheaf_nobarn(struct rcu_head *head)
2969 {
2970 	struct slab_sheaf *sheaf;
2971 	struct kmem_cache *s;
2972 
2973 	sheaf = container_of(head, struct slab_sheaf, rcu_head);
2974 	s = sheaf->cache;
2975 
2976 	__rcu_free_sheaf_prepare(s, sheaf);
2977 
2978 	sheaf_flush_unused(s, sheaf);
2979 
2980 	free_empty_sheaf(s, sheaf);
2981 }
2982 
2983 /*
2984  * Caller needs to make sure migration is disabled in order to fully flush
2985  * single cpu's sheaves
2986  *
2987  * must not be called from an irq
2988  *
2989  * flushing operations are rare so let's keep it simple and flush to slabs
2990  * directly, skipping the barn
2991  */
2992 static void pcs_flush_all(struct kmem_cache *s)
2993 {
2994 	struct slub_percpu_sheaves *pcs;
2995 	struct slab_sheaf *spare, *rcu_free;
2996 
2997 	local_lock(&s->cpu_sheaves->lock);
2998 	pcs = this_cpu_ptr(s->cpu_sheaves);
2999 
3000 	spare = pcs->spare;
3001 	pcs->spare = NULL;
3002 
3003 	rcu_free = pcs->rcu_free;
3004 	pcs->rcu_free = NULL;
3005 
3006 	local_unlock(&s->cpu_sheaves->lock);
3007 
3008 	if (spare) {
3009 		sheaf_flush_unused(s, spare);
3010 		free_empty_sheaf(s, spare);
3011 	}
3012 
3013 	if (rcu_free)
3014 		call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
3015 
3016 	sheaf_flush_main(s);
3017 }
3018 
3019 static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu)
3020 {
3021 	struct slub_percpu_sheaves *pcs;
3022 
3023 	pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3024 
3025 	/* The cpu is not executing anymore so we don't need pcs->lock */
3026 	sheaf_flush_unused(s, pcs->main);
3027 	if (pcs->spare) {
3028 		sheaf_flush_unused(s, pcs->spare);
3029 		free_empty_sheaf(s, pcs->spare);
3030 		pcs->spare = NULL;
3031 	}
3032 
3033 	if (pcs->rcu_free) {
3034 		call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn);
3035 		pcs->rcu_free = NULL;
3036 	}
3037 }
3038 
3039 static void pcs_destroy(struct kmem_cache *s)
3040 {
3041 	int cpu;
3042 
3043 	/*
3044 	 * We may be unwinding cache creation that failed before or during the
3045 	 * allocation of this.
3046 	 */
3047 	if (!s->cpu_sheaves)
3048 		return;
3049 
3050 	/* pcs->main can only point to the bootstrap sheaf, nothing to free */
3051 	if (!cache_has_sheaves(s))
3052 		goto free_pcs;
3053 
3054 	for_each_possible_cpu(cpu) {
3055 		struct slub_percpu_sheaves *pcs;
3056 
3057 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3058 
3059 		/* This can happen when unwinding failed cache creation. */
3060 		if (!pcs->main)
3061 			continue;
3062 
3063 		/*
3064 		 * We have already passed __kmem_cache_shutdown() so everything
3065 		 * was flushed and there should be no objects allocated from
3066 		 * slabs, otherwise kmem_cache_destroy() would have aborted.
3067 		 * Therefore something would have to be really wrong if the
3068 		 * warnings here trigger, and we should rather leave objects and
3069 		 * sheaves to leak in that case.
3070 		 */
3071 
3072 		WARN_ON(pcs->spare);
3073 		WARN_ON(pcs->rcu_free);
3074 
3075 		if (!WARN_ON(pcs->main->size)) {
3076 			free_empty_sheaf(s, pcs->main);
3077 			pcs->main = NULL;
3078 		}
3079 	}
3080 
3081 free_pcs:
3082 	free_percpu(s->cpu_sheaves);
3083 	s->cpu_sheaves = NULL;
3084 }
3085 
3086 static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn,
3087 					       bool allow_spin)
3088 {
3089 	struct slab_sheaf *empty = NULL;
3090 	unsigned long flags;
3091 
3092 	if (!data_race(barn->nr_empty))
3093 		return NULL;
3094 
3095 	if (likely(allow_spin))
3096 		spin_lock_irqsave(&barn->lock, flags);
3097 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3098 		return NULL;
3099 
3100 	if (likely(barn->nr_empty)) {
3101 		empty = list_first_entry(&barn->sheaves_empty,
3102 					 struct slab_sheaf, barn_list);
3103 		list_del(&empty->barn_list);
3104 		barn->nr_empty--;
3105 	}
3106 
3107 	spin_unlock_irqrestore(&barn->lock, flags);
3108 
3109 	return empty;
3110 }
3111 
3112 /*
3113  * The following two functions are used mainly in cases where we have to undo an
3114  * intended action due to a race or cpu migration. Thus they do not check the
3115  * empty or full sheaf limits for simplicity.
3116  */
3117 
3118 static void barn_put_empty_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
3119 {
3120 	unsigned long flags;
3121 
3122 	spin_lock_irqsave(&barn->lock, flags);
3123 
3124 	list_add(&sheaf->barn_list, &barn->sheaves_empty);
3125 	barn->nr_empty++;
3126 
3127 	spin_unlock_irqrestore(&barn->lock, flags);
3128 }
3129 
3130 static void barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
3131 {
3132 	unsigned long flags;
3133 
3134 	spin_lock_irqsave(&barn->lock, flags);
3135 
3136 	list_add(&sheaf->barn_list, &barn->sheaves_full);
3137 	barn->nr_full++;
3138 
3139 	spin_unlock_irqrestore(&barn->lock, flags);
3140 }
3141 
3142 static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
3143 {
3144 	struct slab_sheaf *sheaf = NULL;
3145 	unsigned long flags;
3146 
3147 	if (!data_race(barn->nr_full) && !data_race(barn->nr_empty))
3148 		return NULL;
3149 
3150 	spin_lock_irqsave(&barn->lock, flags);
3151 
3152 	if (barn->nr_full) {
3153 		sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
3154 					barn_list);
3155 		list_del(&sheaf->barn_list);
3156 		barn->nr_full--;
3157 	} else if (barn->nr_empty) {
3158 		sheaf = list_first_entry(&barn->sheaves_empty,
3159 					 struct slab_sheaf, barn_list);
3160 		list_del(&sheaf->barn_list);
3161 		barn->nr_empty--;
3162 	}
3163 
3164 	spin_unlock_irqrestore(&barn->lock, flags);
3165 
3166 	return sheaf;
3167 }
3168 
3169 /*
3170  * If a full sheaf is available, return it and put the supplied empty one to
3171  * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't
3172  * change.
3173  */
3174 static struct slab_sheaf *
3175 barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty,
3176 			 bool allow_spin)
3177 {
3178 	struct slab_sheaf *full = NULL;
3179 	unsigned long flags;
3180 
3181 	if (!data_race(barn->nr_full))
3182 		return NULL;
3183 
3184 	if (likely(allow_spin))
3185 		spin_lock_irqsave(&barn->lock, flags);
3186 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3187 		return NULL;
3188 
3189 	if (likely(barn->nr_full)) {
3190 		full = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
3191 					barn_list);
3192 		list_del(&full->barn_list);
3193 		list_add(&empty->barn_list, &barn->sheaves_empty);
3194 		barn->nr_full--;
3195 		barn->nr_empty++;
3196 	}
3197 
3198 	spin_unlock_irqrestore(&barn->lock, flags);
3199 
3200 	return full;
3201 }
3202 
3203 /*
3204  * If an empty sheaf is available, return it and put the supplied full one to
3205  * barn. But if there are too many full sheaves, reject this with -E2BIG.
3206  */
3207 static struct slab_sheaf *
3208 barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full,
3209 			bool allow_spin)
3210 {
3211 	struct slab_sheaf *empty;
3212 	unsigned long flags;
3213 
3214 	/* we don't repeat this check under barn->lock as it's not critical */
3215 	if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES)
3216 		return ERR_PTR(-E2BIG);
3217 	if (!data_race(barn->nr_empty))
3218 		return ERR_PTR(-ENOMEM);
3219 
3220 	if (likely(allow_spin))
3221 		spin_lock_irqsave(&barn->lock, flags);
3222 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3223 		return ERR_PTR(-EBUSY);
3224 
3225 	if (likely(barn->nr_empty)) {
3226 		empty = list_first_entry(&barn->sheaves_empty, struct slab_sheaf,
3227 					 barn_list);
3228 		list_del(&empty->barn_list);
3229 		list_add(&full->barn_list, &barn->sheaves_full);
3230 		barn->nr_empty--;
3231 		barn->nr_full++;
3232 	} else {
3233 		empty = ERR_PTR(-ENOMEM);
3234 	}
3235 
3236 	spin_unlock_irqrestore(&barn->lock, flags);
3237 
3238 	return empty;
3239 }
3240 
3241 static void barn_init(struct node_barn *barn)
3242 {
3243 	spin_lock_init(&barn->lock);
3244 	INIT_LIST_HEAD(&barn->sheaves_full);
3245 	INIT_LIST_HEAD(&barn->sheaves_empty);
3246 	barn->nr_full = 0;
3247 	barn->nr_empty = 0;
3248 }
3249 
3250 static void barn_shrink(struct kmem_cache *s, struct node_barn *barn)
3251 {
3252 	LIST_HEAD(empty_list);
3253 	LIST_HEAD(full_list);
3254 	struct slab_sheaf *sheaf, *sheaf2;
3255 	unsigned long flags;
3256 
3257 	spin_lock_irqsave(&barn->lock, flags);
3258 
3259 	list_splice_init(&barn->sheaves_full, &full_list);
3260 	barn->nr_full = 0;
3261 	list_splice_init(&barn->sheaves_empty, &empty_list);
3262 	barn->nr_empty = 0;
3263 
3264 	spin_unlock_irqrestore(&barn->lock, flags);
3265 
3266 	list_for_each_entry_safe(sheaf, sheaf2, &full_list, barn_list) {
3267 		sheaf_flush_unused(s, sheaf);
3268 		free_empty_sheaf(s, sheaf);
3269 	}
3270 
3271 	list_for_each_entry_safe(sheaf, sheaf2, &empty_list, barn_list)
3272 		free_empty_sheaf(s, sheaf);
3273 }
3274 
3275 /*
3276  * Slab allocation and freeing
3277  */
3278 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
3279 					   struct kmem_cache_order_objects oo,
3280 					   bool allow_spin)
3281 {
3282 	struct page *page;
3283 	struct slab *slab;
3284 	unsigned int order = oo_order(oo);
3285 
3286 	if (unlikely(!allow_spin))
3287 		page = alloc_frozen_pages_nolock(0/* __GFP_COMP is implied */,
3288 								  node, order);
3289 	else if (node == NUMA_NO_NODE)
3290 		page = alloc_frozen_pages(flags, order);
3291 	else
3292 		page = __alloc_frozen_pages(flags, order, node, NULL);
3293 
3294 	if (!page)
3295 		return NULL;
3296 
3297 	__SetPageSlab(page);
3298 	slab = page_slab(page);
3299 	if (page_is_pfmemalloc(page))
3300 		slab_set_pfmemalloc(slab);
3301 
3302 	return slab;
3303 }
3304 
3305 #ifdef CONFIG_SLAB_FREELIST_RANDOM
3306 /* Pre-initialize the random sequence cache */
3307 static int init_cache_random_seq(struct kmem_cache *s)
3308 {
3309 	unsigned int count = oo_objects(s->oo);
3310 	int err;
3311 
3312 	/* Bailout if already initialised */
3313 	if (s->random_seq)
3314 		return 0;
3315 
3316 	err = cache_random_seq_create(s, count, GFP_KERNEL);
3317 	if (err) {
3318 		pr_err("SLUB: Unable to initialize free list for %s\n",
3319 			s->name);
3320 		return err;
3321 	}
3322 
3323 	/* Transform to an offset on the set of pages */
3324 	if (s->random_seq) {
3325 		unsigned int i;
3326 
3327 		for (i = 0; i < count; i++)
3328 			s->random_seq[i] *= s->size;
3329 	}
3330 	return 0;
3331 }
3332 
3333 /* Initialize each random sequence freelist per cache */
3334 static void __init init_freelist_randomization(void)
3335 {
3336 	struct kmem_cache *s;
3337 
3338 	mutex_lock(&slab_mutex);
3339 
3340 	list_for_each_entry(s, &slab_caches, list)
3341 		init_cache_random_seq(s);
3342 
3343 	mutex_unlock(&slab_mutex);
3344 }
3345 
3346 /* Get the next entry on the pre-computed freelist randomized */
3347 static void *next_freelist_entry(struct kmem_cache *s,
3348 				unsigned long *pos, void *start,
3349 				unsigned long page_limit,
3350 				unsigned long freelist_count)
3351 {
3352 	unsigned int idx;
3353 
3354 	/*
3355 	 * If the target page allocation failed, the number of objects on the
3356 	 * page might be smaller than the usual size defined by the cache.
3357 	 */
3358 	do {
3359 		idx = s->random_seq[*pos];
3360 		*pos += 1;
3361 		if (*pos >= freelist_count)
3362 			*pos = 0;
3363 	} while (unlikely(idx >= page_limit));
3364 
3365 	return (char *)start + idx;
3366 }
3367 
3368 static DEFINE_PER_CPU(struct rnd_state, slab_rnd_state);
3369 
3370 /* Shuffle the single linked freelist based on a random pre-computed sequence */
3371 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
3372 			     bool allow_spin)
3373 {
3374 	void *start;
3375 	void *cur;
3376 	void *next;
3377 	unsigned long idx, pos, page_limit, freelist_count;
3378 
3379 	if (slab->objects < 2 || !s->random_seq)
3380 		return false;
3381 
3382 	freelist_count = oo_objects(s->oo);
3383 	if (allow_spin) {
3384 		pos = get_random_u32_below(freelist_count);
3385 	} else {
3386 		struct rnd_state *state;
3387 
3388 		/*
3389 		 * An interrupt or NMI handler might interrupt and change
3390 		 * the state in the middle, but that's safe.
3391 		 */
3392 		state = &get_cpu_var(slab_rnd_state);
3393 		pos = prandom_u32_state(state) % freelist_count;
3394 		put_cpu_var(slab_rnd_state);
3395 	}
3396 
3397 	page_limit = slab->objects * s->size;
3398 	start = fixup_red_left(s, slab_address(slab));
3399 
3400 	/* First entry is used as the base of the freelist */
3401 	cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
3402 	cur = setup_object(s, cur);
3403 	slab->freelist = cur;
3404 
3405 	for (idx = 1; idx < slab->objects; idx++) {
3406 		next = next_freelist_entry(s, &pos, start, page_limit,
3407 			freelist_count);
3408 		next = setup_object(s, next);
3409 		set_freepointer(s, cur, next);
3410 		cur = next;
3411 	}
3412 	set_freepointer(s, cur, NULL);
3413 
3414 	return true;
3415 }
3416 #else
3417 static inline int init_cache_random_seq(struct kmem_cache *s)
3418 {
3419 	return 0;
3420 }
3421 static inline void init_freelist_randomization(void) { }
3422 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
3423 				    bool allow_spin)
3424 {
3425 	return false;
3426 }
3427 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
3428 
3429 static __always_inline void account_slab(struct slab *slab, int order,
3430 					 struct kmem_cache *s, gfp_t gfp)
3431 {
3432 	if (memcg_kmem_online() &&
3433 			(s->flags & SLAB_ACCOUNT) &&
3434 			!slab_obj_exts(slab))
3435 		alloc_slab_obj_exts(slab, s, gfp, true);
3436 
3437 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
3438 			    PAGE_SIZE << order);
3439 }
3440 
3441 static __always_inline void unaccount_slab(struct slab *slab, int order,
3442 					   struct kmem_cache *s, bool allow_spin)
3443 {
3444 	/*
3445 	 * The slab object extensions should now be freed regardless of
3446 	 * whether mem_alloc_profiling_enabled() or not because profiling
3447 	 * might have been disabled after slab->obj_exts got allocated.
3448 	 */
3449 	free_slab_obj_exts(slab, allow_spin);
3450 
3451 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
3452 			    -(PAGE_SIZE << order));
3453 }
3454 
3455 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
3456 {
3457 	bool allow_spin = gfpflags_allow_spinning(flags);
3458 	struct slab *slab;
3459 	struct kmem_cache_order_objects oo = s->oo;
3460 	gfp_t alloc_gfp;
3461 	void *start, *p, *next;
3462 	int idx;
3463 	bool shuffle;
3464 
3465 	flags &= gfp_allowed_mask;
3466 
3467 	flags |= s->allocflags;
3468 
3469 	/*
3470 	 * Let the initial higher-order allocation fail under memory pressure
3471 	 * so we fall-back to the minimum order allocation.
3472 	 */
3473 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
3474 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
3475 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
3476 
3477 	/*
3478 	 * __GFP_RECLAIM could be cleared on the first allocation attempt,
3479 	 * so pass allow_spin flag directly.
3480 	 */
3481 	slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
3482 	if (unlikely(!slab)) {
3483 		oo = s->min;
3484 		alloc_gfp = flags;
3485 		/*
3486 		 * Allocation may have failed due to fragmentation.
3487 		 * Try a lower order alloc if possible
3488 		 */
3489 		slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
3490 		if (unlikely(!slab))
3491 			return NULL;
3492 		stat(s, ORDER_FALLBACK);
3493 	}
3494 
3495 	slab->objects = oo_objects(oo);
3496 	slab->inuse = 0;
3497 	slab->frozen = 0;
3498 
3499 	slab->slab_cache = s;
3500 
3501 	kasan_poison_slab(slab);
3502 
3503 	start = slab_address(slab);
3504 
3505 	setup_slab_debug(s, slab, start);
3506 	init_slab_obj_exts(slab);
3507 	/*
3508 	 * Poison the slab before initializing the slabobj_ext array
3509 	 * to prevent the array from being overwritten.
3510 	 */
3511 	alloc_slab_obj_exts_early(s, slab);
3512 	account_slab(slab, oo_order(oo), s, flags);
3513 
3514 	shuffle = shuffle_freelist(s, slab, allow_spin);
3515 
3516 	if (!shuffle) {
3517 		start = fixup_red_left(s, start);
3518 		start = setup_object(s, start);
3519 		slab->freelist = start;
3520 		for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
3521 			next = p + s->size;
3522 			next = setup_object(s, next);
3523 			set_freepointer(s, p, next);
3524 			p = next;
3525 		}
3526 		set_freepointer(s, p, NULL);
3527 	}
3528 
3529 	return slab;
3530 }
3531 
3532 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
3533 {
3534 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
3535 		flags = kmalloc_fix_flags(flags);
3536 
3537 	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
3538 
3539 	return allocate_slab(s,
3540 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
3541 }
3542 
3543 static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin)
3544 {
3545 	struct page *page = slab_page(slab);
3546 	int order = compound_order(page);
3547 	int pages = 1 << order;
3548 
3549 	__slab_clear_pfmemalloc(slab);
3550 	page->mapping = NULL;
3551 	__ClearPageSlab(page);
3552 	mm_account_reclaimed_pages(pages);
3553 	unaccount_slab(slab, order, s, allow_spin);
3554 	if (allow_spin)
3555 		free_frozen_pages(page, order);
3556 	else
3557 		free_frozen_pages_nolock(page, order);
3558 }
3559 
3560 static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab)
3561 {
3562 	/*
3563 	 * Since it was just allocated, we can skip the actions in
3564 	 * discard_slab() and free_slab().
3565 	 */
3566 	__free_slab(s, slab, false);
3567 }
3568 
3569 static void rcu_free_slab(struct rcu_head *h)
3570 {
3571 	struct slab *slab = container_of(h, struct slab, rcu_head);
3572 
3573 	__free_slab(slab->slab_cache, slab, true);
3574 }
3575 
3576 static void free_slab(struct kmem_cache *s, struct slab *slab)
3577 {
3578 	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
3579 		void *p;
3580 
3581 		slab_pad_check(s, slab);
3582 		for_each_object(p, s, slab_address(slab), slab->objects)
3583 			check_object(s, slab, p, SLUB_RED_INACTIVE);
3584 	}
3585 
3586 	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
3587 		call_rcu(&slab->rcu_head, rcu_free_slab);
3588 	else
3589 		__free_slab(s, slab, true);
3590 }
3591 
3592 static void discard_slab(struct kmem_cache *s, struct slab *slab)
3593 {
3594 	dec_slabs_node(s, slab_nid(slab), slab->objects);
3595 	free_slab(s, slab);
3596 }
3597 
3598 static inline bool slab_test_node_partial(const struct slab *slab)
3599 {
3600 	return test_bit(SL_partial, &slab->flags.f);
3601 }
3602 
3603 static inline void slab_set_node_partial(struct slab *slab)
3604 {
3605 	set_bit(SL_partial, &slab->flags.f);
3606 }
3607 
3608 static inline void slab_clear_node_partial(struct slab *slab)
3609 {
3610 	clear_bit(SL_partial, &slab->flags.f);
3611 }
3612 
3613 /*
3614  * Management of partially allocated slabs.
3615  */
3616 static inline void
3617 __add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode)
3618 {
3619 	n->nr_partial++;
3620 	if (mode == ADD_TO_TAIL)
3621 		list_add_tail(&slab->slab_list, &n->partial);
3622 	else
3623 		list_add(&slab->slab_list, &n->partial);
3624 	slab_set_node_partial(slab);
3625 }
3626 
3627 static inline void add_partial(struct kmem_cache_node *n,
3628 				struct slab *slab, enum add_mode mode)
3629 {
3630 	lockdep_assert_held(&n->list_lock);
3631 	__add_partial(n, slab, mode);
3632 }
3633 
3634 static inline void remove_partial(struct kmem_cache_node *n,
3635 					struct slab *slab)
3636 {
3637 	lockdep_assert_held(&n->list_lock);
3638 	list_del(&slab->slab_list);
3639 	slab_clear_node_partial(slab);
3640 	n->nr_partial--;
3641 }
3642 
3643 /*
3644  * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
3645  * slab from the n->partial list. Remove only a single object from the slab, do
3646  * the alloc_debug_processing() checks and leave the slab on the list, or move
3647  * it to full list if it was the last free object.
3648  */
3649 static void *alloc_single_from_partial(struct kmem_cache *s,
3650 		struct kmem_cache_node *n, struct slab *slab, int orig_size)
3651 {
3652 	void *object;
3653 
3654 	lockdep_assert_held(&n->list_lock);
3655 
3656 #ifdef CONFIG_SLUB_DEBUG
3657 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3658 		if (!validate_slab_ptr(slab)) {
3659 			slab_err(s, slab, "Not a valid slab page");
3660 			return NULL;
3661 		}
3662 	}
3663 #endif
3664 
3665 	object = slab->freelist;
3666 	slab->freelist = get_freepointer(s, object);
3667 	slab->inuse++;
3668 
3669 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
3670 		remove_partial(n, slab);
3671 		return NULL;
3672 	}
3673 
3674 	if (slab->inuse == slab->objects) {
3675 		remove_partial(n, slab);
3676 		add_full(s, n, slab);
3677 	}
3678 
3679 	return object;
3680 }
3681 
3682 /*
3683  * Called only for kmem_cache_debug() caches to allocate from a freshly
3684  * allocated slab. Allocate a single object instead of whole freelist
3685  * and put the slab to the partial (or full) list.
3686  */
3687 static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
3688 					int orig_size, gfp_t gfpflags)
3689 {
3690 	bool allow_spin = gfpflags_allow_spinning(gfpflags);
3691 	int nid = slab_nid(slab);
3692 	struct kmem_cache_node *n = get_node(s, nid);
3693 	unsigned long flags;
3694 	void *object;
3695 
3696 	if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
3697 		/* Unlucky, discard newly allocated slab. */
3698 		free_new_slab_nolock(s, slab);
3699 		return NULL;
3700 	}
3701 
3702 	object = slab->freelist;
3703 	slab->freelist = get_freepointer(s, object);
3704 	slab->inuse = 1;
3705 
3706 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
3707 		/*
3708 		 * It's not really expected that this would fail on a
3709 		 * freshly allocated slab, but a concurrent memory
3710 		 * corruption in theory could cause that.
3711 		 * Leak memory of allocated slab.
3712 		 */
3713 		if (!allow_spin)
3714 			spin_unlock_irqrestore(&n->list_lock, flags);
3715 		return NULL;
3716 	}
3717 
3718 	if (allow_spin)
3719 		spin_lock_irqsave(&n->list_lock, flags);
3720 
3721 	if (slab->inuse == slab->objects)
3722 		add_full(s, n, slab);
3723 	else
3724 		add_partial(n, slab, ADD_TO_HEAD);
3725 
3726 	inc_slabs_node(s, nid, slab->objects);
3727 	spin_unlock_irqrestore(&n->list_lock, flags);
3728 
3729 	return object;
3730 }
3731 
3732 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
3733 
3734 static bool get_partial_node_bulk(struct kmem_cache *s,
3735 				  struct kmem_cache_node *n,
3736 				  struct partial_bulk_context *pc,
3737 				  bool allow_spin)
3738 {
3739 	struct slab *slab, *slab2;
3740 	unsigned int total_free = 0;
3741 	unsigned long flags;
3742 
3743 	/* Racy check to avoid taking the lock unnecessarily. */
3744 	if (!n || data_race(!n->nr_partial))
3745 		return false;
3746 
3747 	INIT_LIST_HEAD(&pc->slabs);
3748 
3749 	if (allow_spin)
3750 		spin_lock_irqsave(&n->list_lock, flags);
3751 	else if (!spin_trylock_irqsave(&n->list_lock, flags))
3752 		return false;
3753 
3754 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
3755 		struct freelist_counters flc;
3756 		unsigned int slab_free;
3757 
3758 		if (!pfmemalloc_match(slab, pc->flags))
3759 			continue;
3760 
3761 		/*
3762 		 * determine the number of free objects in the slab racily
3763 		 *
3764 		 * slab_free is a lower bound due to possible subsequent
3765 		 * concurrent freeing, so the caller may get more objects than
3766 		 * requested and must handle that
3767 		 */
3768 		flc.counters = data_race(READ_ONCE(slab->counters));
3769 		slab_free = flc.objects - flc.inuse;
3770 
3771 		/* we have already min and this would get us over the max */
3772 		if (total_free >= pc->min_objects
3773 		    && total_free + slab_free > pc->max_objects)
3774 			break;
3775 
3776 		remove_partial(n, slab);
3777 
3778 		list_add(&slab->slab_list, &pc->slabs);
3779 
3780 		total_free += slab_free;
3781 		if (total_free >= pc->max_objects)
3782 			break;
3783 	}
3784 
3785 	spin_unlock_irqrestore(&n->list_lock, flags);
3786 	return total_free > 0;
3787 }
3788 
3789 /*
3790  * Try to allocate object from a partial slab on a specific node.
3791  */
3792 static void *get_from_partial_node(struct kmem_cache *s,
3793 				   struct kmem_cache_node *n,
3794 				   struct partial_context *pc)
3795 {
3796 	struct slab *slab, *slab2;
3797 	unsigned long flags;
3798 	void *object = NULL;
3799 
3800 	/*
3801 	 * Racy check. If we mistakenly see no partial slabs then we
3802 	 * just allocate an empty slab. If we mistakenly try to get a
3803 	 * partial slab and there is none available then get_from_partial()
3804 	 * will return NULL.
3805 	 */
3806 	if (!n || !n->nr_partial)
3807 		return NULL;
3808 
3809 	if (gfpflags_allow_spinning(pc->flags))
3810 		spin_lock_irqsave(&n->list_lock, flags);
3811 	else if (!spin_trylock_irqsave(&n->list_lock, flags))
3812 		return NULL;
3813 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
3814 
3815 		struct freelist_counters old, new;
3816 
3817 		if (!pfmemalloc_match(slab, pc->flags))
3818 			continue;
3819 
3820 		if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
3821 			object = alloc_single_from_partial(s, n, slab,
3822 							pc->orig_size);
3823 			if (object)
3824 				break;
3825 			continue;
3826 		}
3827 
3828 		/*
3829 		 * get a single object from the slab. This might race against
3830 		 * __slab_free(), which however has to take the list_lock if
3831 		 * it's about to make the slab fully free.
3832 		 */
3833 		do {
3834 			old.freelist = slab->freelist;
3835 			old.counters = slab->counters;
3836 
3837 			new.freelist = get_freepointer(s, old.freelist);
3838 			new.counters = old.counters;
3839 			new.inuse++;
3840 
3841 		} while (!__slab_update_freelist(s, slab, &old, &new, "get_from_partial_node"));
3842 
3843 		object = old.freelist;
3844 		if (!new.freelist)
3845 			remove_partial(n, slab);
3846 
3847 		break;
3848 	}
3849 	spin_unlock_irqrestore(&n->list_lock, flags);
3850 	return object;
3851 }
3852 
3853 /*
3854  * Get an object from somewhere. Search in increasing NUMA distances.
3855  */
3856 static void *get_from_any_partial(struct kmem_cache *s, struct partial_context *pc)
3857 {
3858 #ifdef CONFIG_NUMA
3859 	struct zonelist *zonelist;
3860 	struct zoneref *z;
3861 	struct zone *zone;
3862 	enum zone_type highest_zoneidx = gfp_zone(pc->flags);
3863 	unsigned int cpuset_mems_cookie;
3864 	bool allow_spin = gfpflags_allow_spinning(pc->flags);
3865 
3866 	/*
3867 	 * The defrag ratio allows a configuration of the tradeoffs between
3868 	 * inter node defragmentation and node local allocations. A lower
3869 	 * defrag_ratio increases the tendency to do local allocations
3870 	 * instead of attempting to obtain partial slabs from other nodes.
3871 	 *
3872 	 * If the defrag_ratio is set to 0 then kmalloc() always
3873 	 * returns node local objects. If the ratio is higher then kmalloc()
3874 	 * may return off node objects because partial slabs are obtained
3875 	 * from other nodes and filled up.
3876 	 *
3877 	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
3878 	 * (which makes defrag_ratio = 1000) then every (well almost)
3879 	 * allocation will first attempt to defrag slab caches on other nodes.
3880 	 * This means scanning over all nodes to look for partial slabs which
3881 	 * may be expensive if we do it every time we are trying to find a slab
3882 	 * with available objects.
3883 	 */
3884 	if (!s->remote_node_defrag_ratio ||
3885 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
3886 		return NULL;
3887 
3888 	do {
3889 		/*
3890 		 * read_mems_allowed_begin() accesses current->mems_allowed_seq,
3891 		 * a seqcount_spinlock_t that is not NMI-safe. Do not access
3892 		 * current->mems_allowed_seq and avoid retry when GFP flags
3893 		 * indicate spinning is not allowed.
3894 		 */
3895 		if (allow_spin)
3896 			cpuset_mems_cookie = read_mems_allowed_begin();
3897 
3898 		zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
3899 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3900 			struct kmem_cache_node *n;
3901 
3902 			n = get_node(s, zone_to_nid(zone));
3903 
3904 			if (n && cpuset_zone_allowed(zone, pc->flags) &&
3905 					n->nr_partial > s->min_partial) {
3906 
3907 				void *object = get_from_partial_node(s, n, pc);
3908 
3909 				if (object) {
3910 					/*
3911 					 * Don't check read_mems_allowed_retry()
3912 					 * here - if mems_allowed was updated in
3913 					 * parallel, that was a harmless race
3914 					 * between allocation and the cpuset
3915 					 * update
3916 					 */
3917 					return object;
3918 				}
3919 			}
3920 		}
3921 	} while (allow_spin && read_mems_allowed_retry(cpuset_mems_cookie));
3922 #endif	/* CONFIG_NUMA */
3923 	return NULL;
3924 }
3925 
3926 /*
3927  * Get an object from a partial slab
3928  */
3929 static void *get_from_partial(struct kmem_cache *s, int node,
3930 			      struct partial_context *pc)
3931 {
3932 	int searchnode = node;
3933 	void *object;
3934 
3935 	if (node == NUMA_NO_NODE)
3936 		searchnode = numa_mem_id();
3937 
3938 	object = get_from_partial_node(s, get_node(s, searchnode), pc);
3939 	if (object || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
3940 		return object;
3941 
3942 	return get_from_any_partial(s, pc);
3943 }
3944 
3945 static bool has_pcs_used(int cpu, struct kmem_cache *s)
3946 {
3947 	struct slub_percpu_sheaves *pcs;
3948 
3949 	if (!cache_has_sheaves(s))
3950 		return false;
3951 
3952 	pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3953 
3954 	return (pcs->spare || pcs->rcu_free || pcs->main->size);
3955 }
3956 
3957 /*
3958  * Flush percpu sheaves
3959  *
3960  * Called from CPU work handler with migration disabled.
3961  */
3962 static void flush_cpu_sheaves(struct work_struct *w)
3963 {
3964 	struct kmem_cache *s;
3965 	struct slub_flush_work *sfw;
3966 
3967 	sfw = container_of(w, struct slub_flush_work, work);
3968 
3969 	s = sfw->s;
3970 
3971 	if (cache_has_sheaves(s))
3972 		pcs_flush_all(s);
3973 }
3974 
3975 static void flush_all_cpus_locked(struct kmem_cache *s)
3976 {
3977 	struct slub_flush_work *sfw;
3978 	unsigned int cpu;
3979 
3980 	lockdep_assert_cpus_held();
3981 	mutex_lock(&flush_lock);
3982 
3983 	for_each_online_cpu(cpu) {
3984 		sfw = &per_cpu(slub_flush, cpu);
3985 		if (!has_pcs_used(cpu, s)) {
3986 			sfw->skip = true;
3987 			continue;
3988 		}
3989 		INIT_WORK(&sfw->work, flush_cpu_sheaves);
3990 		sfw->skip = false;
3991 		sfw->s = s;
3992 		queue_work_on(cpu, flushwq, &sfw->work);
3993 	}
3994 
3995 	for_each_online_cpu(cpu) {
3996 		sfw = &per_cpu(slub_flush, cpu);
3997 		if (sfw->skip)
3998 			continue;
3999 		flush_work(&sfw->work);
4000 	}
4001 
4002 	mutex_unlock(&flush_lock);
4003 }
4004 
4005 static void flush_all(struct kmem_cache *s)
4006 {
4007 	cpus_read_lock();
4008 	flush_all_cpus_locked(s);
4009 	cpus_read_unlock();
4010 }
4011 
4012 static void flush_rcu_sheaf(struct work_struct *w)
4013 {
4014 	struct slub_percpu_sheaves *pcs;
4015 	struct slab_sheaf *rcu_free;
4016 	struct slub_flush_work *sfw;
4017 	struct kmem_cache *s;
4018 
4019 	sfw = container_of(w, struct slub_flush_work, work);
4020 	s = sfw->s;
4021 
4022 	local_lock(&s->cpu_sheaves->lock);
4023 	pcs = this_cpu_ptr(s->cpu_sheaves);
4024 
4025 	rcu_free = pcs->rcu_free;
4026 	pcs->rcu_free = NULL;
4027 
4028 	local_unlock(&s->cpu_sheaves->lock);
4029 
4030 	if (rcu_free)
4031 		call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
4032 }
4033 
4034 
4035 /* needed for kvfree_rcu_barrier() */
4036 void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
4037 {
4038 	struct slub_flush_work *sfw;
4039 	unsigned int cpu;
4040 
4041 	mutex_lock(&flush_lock);
4042 
4043 	for_each_online_cpu(cpu) {
4044 		sfw = &per_cpu(slub_flush, cpu);
4045 
4046 		/*
4047 		 * we don't check if rcu_free sheaf exists - racing
4048 		 * __kfree_rcu_sheaf() might have just removed it.
4049 		 * by executing flush_rcu_sheaf() on the cpu we make
4050 		 * sure the __kfree_rcu_sheaf() finished its call_rcu()
4051 		 */
4052 
4053 		INIT_WORK(&sfw->work, flush_rcu_sheaf);
4054 		sfw->s = s;
4055 		queue_work_on(cpu, flushwq, &sfw->work);
4056 	}
4057 
4058 	for_each_online_cpu(cpu) {
4059 		sfw = &per_cpu(slub_flush, cpu);
4060 		flush_work(&sfw->work);
4061 	}
4062 
4063 	mutex_unlock(&flush_lock);
4064 }
4065 
4066 void flush_all_rcu_sheaves(void)
4067 {
4068 	struct kmem_cache *s;
4069 
4070 	cpus_read_lock();
4071 	mutex_lock(&slab_mutex);
4072 
4073 	list_for_each_entry(s, &slab_caches, list) {
4074 		if (!cache_has_sheaves(s))
4075 			continue;
4076 		flush_rcu_sheaves_on_cache(s);
4077 	}
4078 
4079 	mutex_unlock(&slab_mutex);
4080 	cpus_read_unlock();
4081 
4082 	rcu_barrier();
4083 }
4084 
4085 /*
4086  * Use the cpu notifier to insure that the cpu slabs are flushed when
4087  * necessary.
4088  */
4089 static int slub_cpu_dead(unsigned int cpu)
4090 {
4091 	struct kmem_cache *s;
4092 
4093 	mutex_lock(&slab_mutex);
4094 	list_for_each_entry(s, &slab_caches, list) {
4095 		if (cache_has_sheaves(s))
4096 			__pcs_flush_all_cpu(s, cpu);
4097 	}
4098 	mutex_unlock(&slab_mutex);
4099 	return 0;
4100 }
4101 
4102 #ifdef CONFIG_SLUB_DEBUG
4103 static int count_free(struct slab *slab)
4104 {
4105 	return slab->objects - slab->inuse;
4106 }
4107 
4108 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
4109 {
4110 	return atomic_long_read(&n->total_objects);
4111 }
4112 
4113 /* Supports checking bulk free of a constructed freelist */
4114 static inline bool free_debug_processing(struct kmem_cache *s,
4115 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
4116 	unsigned long addr, depot_stack_handle_t handle)
4117 {
4118 	bool checks_ok = false;
4119 	void *object = head;
4120 	int cnt = 0;
4121 
4122 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
4123 		if (!check_slab(s, slab))
4124 			goto out;
4125 	}
4126 
4127 	if (slab->inuse < *bulk_cnt) {
4128 		slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
4129 			 slab->inuse, *bulk_cnt);
4130 		goto out;
4131 	}
4132 
4133 next_object:
4134 
4135 	if (++cnt > *bulk_cnt)
4136 		goto out_cnt;
4137 
4138 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
4139 		if (!free_consistency_checks(s, slab, object, addr))
4140 			goto out;
4141 	}
4142 
4143 	if (s->flags & SLAB_STORE_USER)
4144 		set_track_update(s, object, TRACK_FREE, addr, handle);
4145 	trace(s, slab, object, 0);
4146 	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
4147 	init_object(s, object, SLUB_RED_INACTIVE);
4148 
4149 	/* Reached end of constructed freelist yet? */
4150 	if (object != tail) {
4151 		object = get_freepointer(s, object);
4152 		goto next_object;
4153 	}
4154 	checks_ok = true;
4155 
4156 out_cnt:
4157 	if (cnt != *bulk_cnt) {
4158 		slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
4159 			 *bulk_cnt, cnt);
4160 		*bulk_cnt = cnt;
4161 	}
4162 
4163 out:
4164 
4165 	if (!checks_ok)
4166 		slab_fix(s, "Object at 0x%p not freed", object);
4167 
4168 	return checks_ok;
4169 }
4170 #endif /* CONFIG_SLUB_DEBUG */
4171 
4172 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
4173 static unsigned long count_partial(struct kmem_cache_node *n,
4174 					int (*get_count)(struct slab *))
4175 {
4176 	unsigned long flags;
4177 	unsigned long x = 0;
4178 	struct slab *slab;
4179 
4180 	spin_lock_irqsave(&n->list_lock, flags);
4181 	list_for_each_entry(slab, &n->partial, slab_list)
4182 		x += get_count(slab);
4183 	spin_unlock_irqrestore(&n->list_lock, flags);
4184 	return x;
4185 }
4186 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
4187 
4188 #ifdef CONFIG_SLUB_DEBUG
4189 #define MAX_PARTIAL_TO_SCAN 10000
4190 
4191 static unsigned long count_partial_free_approx(struct kmem_cache_node *n)
4192 {
4193 	unsigned long flags;
4194 	unsigned long x = 0;
4195 	struct slab *slab;
4196 
4197 	spin_lock_irqsave(&n->list_lock, flags);
4198 	if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
4199 		list_for_each_entry(slab, &n->partial, slab_list)
4200 			x += slab->objects - slab->inuse;
4201 	} else {
4202 		/*
4203 		 * For a long list, approximate the total count of objects in
4204 		 * it to meet the limit on the number of slabs to scan.
4205 		 * Scan from both the list's head and tail for better accuracy.
4206 		 */
4207 		unsigned long scanned = 0;
4208 
4209 		list_for_each_entry(slab, &n->partial, slab_list) {
4210 			x += slab->objects - slab->inuse;
4211 			if (++scanned == MAX_PARTIAL_TO_SCAN / 2)
4212 				break;
4213 		}
4214 		list_for_each_entry_reverse(slab, &n->partial, slab_list) {
4215 			x += slab->objects - slab->inuse;
4216 			if (++scanned == MAX_PARTIAL_TO_SCAN)
4217 				break;
4218 		}
4219 		x = mult_frac(x, n->nr_partial, scanned);
4220 		x = min(x, node_nr_objs(n));
4221 	}
4222 	spin_unlock_irqrestore(&n->list_lock, flags);
4223 	return x;
4224 }
4225 
4226 static noinline void
4227 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
4228 {
4229 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
4230 				      DEFAULT_RATELIMIT_BURST);
4231 	int cpu = raw_smp_processor_id();
4232 	int node;
4233 	struct kmem_cache_node *n;
4234 
4235 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
4236 		return;
4237 
4238 	pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n",
4239 		cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
4240 	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
4241 		s->name, s->object_size, s->size, oo_order(s->oo),
4242 		oo_order(s->min));
4243 
4244 	if (oo_order(s->min) > get_order(s->object_size))
4245 		pr_warn("  %s debugging increased min order, use slab_debug=O to disable.\n",
4246 			s->name);
4247 
4248 	for_each_kmem_cache_node(s, node, n) {
4249 		unsigned long nr_slabs;
4250 		unsigned long nr_objs;
4251 		unsigned long nr_free;
4252 
4253 		nr_free  = count_partial_free_approx(n);
4254 		nr_slabs = node_nr_slabs(n);
4255 		nr_objs  = node_nr_objs(n);
4256 
4257 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
4258 			node, nr_slabs, nr_objs, nr_free);
4259 	}
4260 }
4261 #else /* CONFIG_SLUB_DEBUG */
4262 static inline void
4263 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
4264 #endif
4265 
4266 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
4267 {
4268 	if (unlikely(slab_test_pfmemalloc(slab)))
4269 		return gfp_pfmemalloc_allowed(gfpflags);
4270 
4271 	return true;
4272 }
4273 
4274 /*
4275  * Get the slab's freelist and do not freeze it.
4276  *
4277  * Assumes the slab is isolated from node partial list and not frozen.
4278  *
4279  * Assumes this is performed only for caches without debugging so we
4280  * don't need to worry about adding the slab to the full list.
4281  */
4282 static inline void *get_freelist_nofreeze(struct kmem_cache *s, struct slab *slab)
4283 {
4284 	struct freelist_counters old, new;
4285 
4286 	do {
4287 		old.freelist = slab->freelist;
4288 		old.counters = slab->counters;
4289 
4290 		new.freelist = NULL;
4291 		new.counters = old.counters;
4292 		VM_WARN_ON_ONCE(new.frozen);
4293 
4294 		new.inuse = old.objects;
4295 
4296 	} while (!slab_update_freelist(s, slab, &old, &new, "get_freelist_nofreeze"));
4297 
4298 	return old.freelist;
4299 }
4300 
4301 /*
4302  * If the object has been wiped upon free, make sure it's fully initialized by
4303  * zeroing out freelist pointer.
4304  *
4305  * Note that we also wipe custom freelist pointers.
4306  */
4307 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
4308 						   void *obj)
4309 {
4310 	if (unlikely(slab_want_init_on_free(s)) && obj &&
4311 	    !freeptr_outside_object(s))
4312 		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
4313 			0, sizeof(void *));
4314 }
4315 
4316 static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
4317 		void **p, unsigned int count, bool allow_spin)
4318 {
4319 	unsigned int allocated = 0;
4320 	struct kmem_cache_node *n;
4321 	bool needs_add_partial;
4322 	unsigned long flags;
4323 	void *object;
4324 
4325 	/*
4326 	 * Are we going to put the slab on the partial list?
4327 	 * Note slab->inuse is 0 on a new slab.
4328 	 */
4329 	needs_add_partial = (slab->objects > count);
4330 
4331 	if (!allow_spin && needs_add_partial) {
4332 
4333 		n = get_node(s, slab_nid(slab));
4334 
4335 		if (!spin_trylock_irqsave(&n->list_lock, flags)) {
4336 			/* Unlucky, discard newly allocated slab */
4337 			free_new_slab_nolock(s, slab);
4338 			return 0;
4339 		}
4340 	}
4341 
4342 	object = slab->freelist;
4343 	while (object && allocated < count) {
4344 		p[allocated] = object;
4345 		object = get_freepointer(s, object);
4346 		maybe_wipe_obj_freeptr(s, p[allocated]);
4347 
4348 		slab->inuse++;
4349 		allocated++;
4350 	}
4351 	slab->freelist = object;
4352 
4353 	if (needs_add_partial) {
4354 
4355 		if (allow_spin) {
4356 			n = get_node(s, slab_nid(slab));
4357 			spin_lock_irqsave(&n->list_lock, flags);
4358 		}
4359 		add_partial(n, slab, ADD_TO_HEAD);
4360 		spin_unlock_irqrestore(&n->list_lock, flags);
4361 	}
4362 
4363 	inc_slabs_node(s, slab_nid(slab), slab->objects);
4364 	return allocated;
4365 }
4366 
4367 /*
4368  * Slow path. We failed to allocate via percpu sheaves or they are not available
4369  * due to bootstrap or debugging enabled or SLUB_TINY.
4370  *
4371  * We try to allocate from partial slab lists and fall back to allocating a new
4372  * slab.
4373  */
4374 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
4375 			   unsigned long addr, unsigned int orig_size)
4376 {
4377 	bool allow_spin = gfpflags_allow_spinning(gfpflags);
4378 	void *object;
4379 	struct slab *slab;
4380 	struct partial_context pc;
4381 	bool try_thisnode = true;
4382 
4383 	stat(s, ALLOC_SLOWPATH);
4384 
4385 new_objects:
4386 
4387 	pc.flags = gfpflags;
4388 	/*
4389 	 * When a preferred node is indicated but no __GFP_THISNODE
4390 	 *
4391 	 * 1) try to get a partial slab from target node only by having
4392 	 *    __GFP_THISNODE in pc.flags for get_from_partial()
4393 	 * 2) if 1) failed, try to allocate a new slab from target node with
4394 	 *    GPF_NOWAIT | __GFP_THISNODE opportunistically
4395 	 * 3) if 2) failed, retry with original gfpflags which will allow
4396 	 *    get_from_partial() try partial lists of other nodes before
4397 	 *    potentially allocating new page from other nodes
4398 	 */
4399 	if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
4400 		     && try_thisnode)) {
4401 		if (unlikely(!allow_spin))
4402 			/* Do not upgrade gfp to NOWAIT from more restrictive mode */
4403 			pc.flags = gfpflags | __GFP_THISNODE;
4404 		else
4405 			pc.flags = GFP_NOWAIT | __GFP_THISNODE;
4406 	}
4407 
4408 	pc.orig_size = orig_size;
4409 	object = get_from_partial(s, node, &pc);
4410 	if (object)
4411 		goto success;
4412 
4413 	slab = new_slab(s, pc.flags, node);
4414 
4415 	if (unlikely(!slab)) {
4416 		if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
4417 		    && try_thisnode) {
4418 			try_thisnode = false;
4419 			goto new_objects;
4420 		}
4421 		slab_out_of_memory(s, gfpflags, node);
4422 		return NULL;
4423 	}
4424 
4425 	stat(s, ALLOC_SLAB);
4426 
4427 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4428 		object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
4429 
4430 		if (likely(object))
4431 			goto success;
4432 	} else {
4433 		alloc_from_new_slab(s, slab, &object, 1, allow_spin);
4434 
4435 		/* we don't need to check SLAB_STORE_USER here */
4436 		if (likely(object))
4437 			return object;
4438 	}
4439 
4440 	if (allow_spin)
4441 		goto new_objects;
4442 
4443 	/* This could cause an endless loop. Fail instead. */
4444 	return NULL;
4445 
4446 success:
4447 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
4448 		set_track(s, object, TRACK_ALLOC, addr, gfpflags);
4449 
4450 	return object;
4451 }
4452 
4453 static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
4454 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4455 {
4456 	void *object;
4457 
4458 #ifdef CONFIG_NUMA
4459 	if (static_branch_unlikely(&strict_numa) &&
4460 			node == NUMA_NO_NODE) {
4461 
4462 		struct mempolicy *mpol = current->mempolicy;
4463 
4464 		if (mpol) {
4465 			/*
4466 			 * Special BIND rule support. If the local node
4467 			 * is in permitted set then do not redirect
4468 			 * to a particular node.
4469 			 * Otherwise we apply the memory policy to get
4470 			 * the node we need to allocate on.
4471 			 */
4472 			if (mpol->mode != MPOL_BIND ||
4473 					!node_isset(numa_mem_id(), mpol->nodes))
4474 				node = mempolicy_slab_node();
4475 		}
4476 	}
4477 #endif
4478 
4479 	object = ___slab_alloc(s, gfpflags, node, addr, orig_size);
4480 
4481 	return object;
4482 }
4483 
4484 static __fastpath_inline
4485 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
4486 {
4487 	flags &= gfp_allowed_mask;
4488 
4489 	might_alloc(flags);
4490 
4491 	if (unlikely(should_failslab(s, flags)))
4492 		return NULL;
4493 
4494 	return s;
4495 }
4496 
4497 static __fastpath_inline
4498 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
4499 			  gfp_t flags, size_t size, void **p, bool init,
4500 			  unsigned int orig_size)
4501 {
4502 	unsigned int zero_size = s->object_size;
4503 	bool kasan_init = init;
4504 	size_t i;
4505 	gfp_t init_flags = flags & gfp_allowed_mask;
4506 
4507 	/*
4508 	 * For kmalloc object, the allocated memory size(object_size) is likely
4509 	 * larger than the requested size(orig_size). If redzone check is
4510 	 * enabled for the extra space, don't zero it, as it will be redzoned
4511 	 * soon. The redzone operation for this extra space could be seen as a
4512 	 * replacement of current poisoning under certain debug option, and
4513 	 * won't break other sanity checks.
4514 	 */
4515 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
4516 	    (s->flags & SLAB_KMALLOC))
4517 		zero_size = orig_size;
4518 
4519 	/*
4520 	 * When slab_debug is enabled, avoid memory initialization integrated
4521 	 * into KASAN and instead zero out the memory via the memset below with
4522 	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
4523 	 * cause false-positive reports. This does not lead to a performance
4524 	 * penalty on production builds, as slab_debug is not intended to be
4525 	 * enabled there.
4526 	 */
4527 	if (__slub_debug_enabled())
4528 		kasan_init = false;
4529 
4530 	/*
4531 	 * As memory initialization might be integrated into KASAN,
4532 	 * kasan_slab_alloc and initialization memset must be
4533 	 * kept together to avoid discrepancies in behavior.
4534 	 *
4535 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
4536 	 */
4537 	for (i = 0; i < size; i++) {
4538 		p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
4539 		if (p[i] && init && (!kasan_init ||
4540 				     !kasan_has_integrated_init()))
4541 			memset(p[i], 0, zero_size);
4542 		if (gfpflags_allow_spinning(flags))
4543 			kmemleak_alloc_recursive(p[i], s->object_size, 1,
4544 						 s->flags, init_flags);
4545 		kmsan_slab_alloc(s, p[i], init_flags);
4546 		alloc_tagging_slab_alloc_hook(s, p[i], flags);
4547 	}
4548 
4549 	return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
4550 }
4551 
4552 /*
4553  * Replace the empty main sheaf with a (at least partially) full sheaf.
4554  *
4555  * Must be called with the cpu_sheaves local lock locked. If successful, returns
4556  * the pcs pointer and the local lock locked (possibly on a different cpu than
4557  * initially called). If not successful, returns NULL and the local lock
4558  * unlocked.
4559  */
4560 static struct slub_percpu_sheaves *
4561 __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, gfp_t gfp)
4562 {
4563 	struct slab_sheaf *empty = NULL;
4564 	struct slab_sheaf *full;
4565 	struct node_barn *barn;
4566 	bool allow_spin;
4567 
4568 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
4569 
4570 	/* Bootstrap or debug cache, back off */
4571 	if (unlikely(!cache_has_sheaves(s))) {
4572 		local_unlock(&s->cpu_sheaves->lock);
4573 		return NULL;
4574 	}
4575 
4576 	if (pcs->spare && pcs->spare->size > 0) {
4577 		swap(pcs->main, pcs->spare);
4578 		return pcs;
4579 	}
4580 
4581 	barn = get_barn(s);
4582 	if (!barn) {
4583 		local_unlock(&s->cpu_sheaves->lock);
4584 		return NULL;
4585 	}
4586 
4587 	allow_spin = gfpflags_allow_spinning(gfp);
4588 
4589 	full = barn_replace_empty_sheaf(barn, pcs->main, allow_spin);
4590 
4591 	if (full) {
4592 		stat(s, BARN_GET);
4593 		pcs->main = full;
4594 		return pcs;
4595 	}
4596 
4597 	stat(s, BARN_GET_FAIL);
4598 
4599 	if (allow_spin) {
4600 		if (pcs->spare) {
4601 			empty = pcs->spare;
4602 			pcs->spare = NULL;
4603 		} else {
4604 			empty = barn_get_empty_sheaf(barn, true);
4605 		}
4606 	}
4607 
4608 	local_unlock(&s->cpu_sheaves->lock);
4609 	pcs = NULL;
4610 
4611 	if (!allow_spin)
4612 		return NULL;
4613 
4614 	if (empty) {
4615 		if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
4616 			full = empty;
4617 		} else {
4618 			/*
4619 			 * we must be very low on memory so don't bother
4620 			 * with the barn
4621 			 */
4622 			sheaf_flush_unused(s, empty);
4623 			free_empty_sheaf(s, empty);
4624 		}
4625 	} else {
4626 		full = alloc_full_sheaf(s, gfp);
4627 	}
4628 
4629 	if (!full)
4630 		return NULL;
4631 
4632 	if (!local_trylock(&s->cpu_sheaves->lock))
4633 		goto barn_put;
4634 	pcs = this_cpu_ptr(s->cpu_sheaves);
4635 
4636 	/*
4637 	 * If we are returning empty sheaf, we either got it from the
4638 	 * barn or had to allocate one. If we are returning a full
4639 	 * sheaf, it's due to racing or being migrated to a different
4640 	 * cpu. Breaching the barn's sheaf limits should be thus rare
4641 	 * enough so just ignore them to simplify the recovery.
4642 	 */
4643 
4644 	if (pcs->main->size == 0) {
4645 		if (!pcs->spare)
4646 			pcs->spare = pcs->main;
4647 		else
4648 			barn_put_empty_sheaf(barn, pcs->main);
4649 		pcs->main = full;
4650 		return pcs;
4651 	}
4652 
4653 	if (!pcs->spare) {
4654 		pcs->spare = full;
4655 		return pcs;
4656 	}
4657 
4658 	if (pcs->spare->size == 0) {
4659 		barn_put_empty_sheaf(barn, pcs->spare);
4660 		pcs->spare = full;
4661 		return pcs;
4662 	}
4663 
4664 barn_put:
4665 	barn_put_full_sheaf(barn, full);
4666 	stat(s, BARN_PUT);
4667 
4668 	return pcs;
4669 }
4670 
4671 static __fastpath_inline
4672 void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
4673 {
4674 	struct slub_percpu_sheaves *pcs;
4675 	bool node_requested;
4676 	void *object;
4677 
4678 #ifdef CONFIG_NUMA
4679 	if (static_branch_unlikely(&strict_numa) &&
4680 			 node == NUMA_NO_NODE) {
4681 
4682 		struct mempolicy *mpol = current->mempolicy;
4683 
4684 		if (mpol) {
4685 			/*
4686 			 * Special BIND rule support. If the local node
4687 			 * is in permitted set then do not redirect
4688 			 * to a particular node.
4689 			 * Otherwise we apply the memory policy to get
4690 			 * the node we need to allocate on.
4691 			 */
4692 			if (mpol->mode != MPOL_BIND ||
4693 					!node_isset(numa_mem_id(), mpol->nodes))
4694 
4695 				node = mempolicy_slab_node();
4696 		}
4697 	}
4698 #endif
4699 
4700 	node_requested = IS_ENABLED(CONFIG_NUMA) && node != NUMA_NO_NODE;
4701 
4702 	/*
4703 	 * We assume the percpu sheaves contain only local objects although it's
4704 	 * not completely guaranteed, so we verify later.
4705 	 */
4706 	if (unlikely(node_requested && node != numa_mem_id())) {
4707 		stat(s, ALLOC_NODE_MISMATCH);
4708 		return NULL;
4709 	}
4710 
4711 	if (!local_trylock(&s->cpu_sheaves->lock))
4712 		return NULL;
4713 
4714 	pcs = this_cpu_ptr(s->cpu_sheaves);
4715 
4716 	if (unlikely(pcs->main->size == 0)) {
4717 		pcs = __pcs_replace_empty_main(s, pcs, gfp);
4718 		if (unlikely(!pcs))
4719 			return NULL;
4720 	}
4721 
4722 	object = pcs->main->objects[pcs->main->size - 1];
4723 
4724 	if (unlikely(node_requested)) {
4725 		/*
4726 		 * Verify that the object was from the node we want. This could
4727 		 * be false because of cpu migration during an unlocked part of
4728 		 * the current allocation or previous freeing process.
4729 		 */
4730 		if (page_to_nid(virt_to_page(object)) != node) {
4731 			local_unlock(&s->cpu_sheaves->lock);
4732 			stat(s, ALLOC_NODE_MISMATCH);
4733 			return NULL;
4734 		}
4735 	}
4736 
4737 	pcs->main->size--;
4738 
4739 	local_unlock(&s->cpu_sheaves->lock);
4740 
4741 	stat(s, ALLOC_FASTPATH);
4742 
4743 	return object;
4744 }
4745 
4746 static __fastpath_inline
4747 unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, gfp_t gfp, size_t size,
4748 				 void **p)
4749 {
4750 	struct slub_percpu_sheaves *pcs;
4751 	struct slab_sheaf *main;
4752 	unsigned int allocated = 0;
4753 	unsigned int batch;
4754 
4755 next_batch:
4756 	if (!local_trylock(&s->cpu_sheaves->lock))
4757 		return allocated;
4758 
4759 	pcs = this_cpu_ptr(s->cpu_sheaves);
4760 
4761 	if (unlikely(pcs->main->size == 0)) {
4762 
4763 		struct slab_sheaf *full;
4764 		struct node_barn *barn;
4765 
4766 		if (unlikely(!cache_has_sheaves(s))) {
4767 			local_unlock(&s->cpu_sheaves->lock);
4768 			return allocated;
4769 		}
4770 
4771 		if (pcs->spare && pcs->spare->size > 0) {
4772 			swap(pcs->main, pcs->spare);
4773 			goto do_alloc;
4774 		}
4775 
4776 		barn = get_barn(s);
4777 		if (!barn) {
4778 			local_unlock(&s->cpu_sheaves->lock);
4779 			return allocated;
4780 		}
4781 
4782 		full = barn_replace_empty_sheaf(barn, pcs->main,
4783 						gfpflags_allow_spinning(gfp));
4784 
4785 		if (full) {
4786 			stat(s, BARN_GET);
4787 			pcs->main = full;
4788 			goto do_alloc;
4789 		}
4790 
4791 		stat(s, BARN_GET_FAIL);
4792 
4793 		local_unlock(&s->cpu_sheaves->lock);
4794 
4795 		/*
4796 		 * Once full sheaves in barn are depleted, let the bulk
4797 		 * allocation continue from slab pages, otherwise we would just
4798 		 * be copying arrays of pointers twice.
4799 		 */
4800 		return allocated;
4801 	}
4802 
4803 do_alloc:
4804 
4805 	main = pcs->main;
4806 	batch = min(size, main->size);
4807 
4808 	main->size -= batch;
4809 	memcpy(p, main->objects + main->size, batch * sizeof(void *));
4810 
4811 	local_unlock(&s->cpu_sheaves->lock);
4812 
4813 	stat_add(s, ALLOC_FASTPATH, batch);
4814 
4815 	allocated += batch;
4816 
4817 	if (batch < size) {
4818 		p += batch;
4819 		size -= batch;
4820 		goto next_batch;
4821 	}
4822 
4823 	return allocated;
4824 }
4825 
4826 
4827 /*
4828  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
4829  * have the fastpath folded into their functions. So no function call
4830  * overhead for requests that can be satisfied on the fastpath.
4831  *
4832  * The fastpath works by first checking if the lockless freelist can be used.
4833  * If not then __slab_alloc is called for slow processing.
4834  *
4835  * Otherwise we can simply pick the next object from the lockless free list.
4836  */
4837 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
4838 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4839 {
4840 	void *object;
4841 	bool init = false;
4842 
4843 	s = slab_pre_alloc_hook(s, gfpflags);
4844 	if (unlikely(!s))
4845 		return NULL;
4846 
4847 	object = kfence_alloc(s, orig_size, gfpflags);
4848 	if (unlikely(object))
4849 		goto out;
4850 
4851 	object = alloc_from_pcs(s, gfpflags, node);
4852 
4853 	if (!object)
4854 		object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
4855 
4856 	maybe_wipe_obj_freeptr(s, object);
4857 	init = slab_want_init_on_alloc(gfpflags, s);
4858 
4859 out:
4860 	/*
4861 	 * When init equals 'true', like for kzalloc() family, only
4862 	 * @orig_size bytes might be zeroed instead of s->object_size
4863 	 * In case this fails due to memcg_slab_post_alloc_hook(),
4864 	 * object is set to NULL
4865 	 */
4866 	slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
4867 
4868 	return object;
4869 }
4870 
4871 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
4872 {
4873 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
4874 				    s->object_size);
4875 
4876 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4877 
4878 	return ret;
4879 }
4880 EXPORT_SYMBOL(kmem_cache_alloc_noprof);
4881 
4882 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
4883 			   gfp_t gfpflags)
4884 {
4885 	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
4886 				    s->object_size);
4887 
4888 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4889 
4890 	return ret;
4891 }
4892 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
4893 
4894 bool kmem_cache_charge(void *objp, gfp_t gfpflags)
4895 {
4896 	if (!memcg_kmem_online())
4897 		return true;
4898 
4899 	return memcg_slab_post_charge(objp, gfpflags);
4900 }
4901 EXPORT_SYMBOL(kmem_cache_charge);
4902 
4903 /**
4904  * kmem_cache_alloc_node - Allocate an object on the specified node
4905  * @s: The cache to allocate from.
4906  * @gfpflags: See kmalloc().
4907  * @node: node number of the target node.
4908  *
4909  * Identical to kmem_cache_alloc but it will allocate memory on the given
4910  * node, which can improve the performance for cpu bound structures.
4911  *
4912  * Fallback to other node is possible if __GFP_THISNODE is not set.
4913  *
4914  * Return: pointer to the new object or %NULL in case of error
4915  */
4916 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
4917 {
4918 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4919 
4920 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
4921 
4922 	return ret;
4923 }
4924 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4925 
4926 static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
4927 				      struct slab_sheaf *sheaf, gfp_t gfp)
4928 {
4929 	gfp_t gfp_nomemalloc;
4930 	int ret;
4931 
4932 	gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
4933 	if (gfp_pfmemalloc_allowed(gfp))
4934 		gfp_nomemalloc |= __GFP_NOWARN;
4935 
4936 	ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
4937 
4938 	if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
4939 		return ret;
4940 
4941 	/*
4942 	 * if we are allowed to, refill sheaf with pfmemalloc but then remember
4943 	 * it for when it's returned
4944 	 */
4945 	ret = refill_sheaf(s, sheaf, gfp);
4946 	sheaf->pfmemalloc = true;
4947 
4948 	return ret;
4949 }
4950 
4951 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
4952 				   size_t size, void **p);
4953 
4954 /*
4955  * returns a sheaf that has at least the requested size
4956  * when prefilling is needed, do so with given gfp flags
4957  *
4958  * return NULL if sheaf allocation or prefilling failed
4959  */
4960 struct slab_sheaf *
4961 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
4962 {
4963 	struct slub_percpu_sheaves *pcs;
4964 	struct slab_sheaf *sheaf = NULL;
4965 	struct node_barn *barn;
4966 
4967 	if (unlikely(!size))
4968 		return NULL;
4969 
4970 	if (unlikely(size > s->sheaf_capacity)) {
4971 
4972 		sheaf = kzalloc_flex(*sheaf, objects, size, gfp);
4973 		if (!sheaf)
4974 			return NULL;
4975 
4976 		stat(s, SHEAF_PREFILL_OVERSIZE);
4977 		sheaf->cache = s;
4978 		sheaf->capacity = size;
4979 
4980 		/*
4981 		 * we do not need to care about pfmemalloc here because oversize
4982 		 * sheaves area always flushed and freed when returned
4983 		 */
4984 		if (!__kmem_cache_alloc_bulk(s, gfp, size,
4985 					     &sheaf->objects[0])) {
4986 			kfree(sheaf);
4987 			return NULL;
4988 		}
4989 
4990 		sheaf->size = size;
4991 
4992 		return sheaf;
4993 	}
4994 
4995 	local_lock(&s->cpu_sheaves->lock);
4996 	pcs = this_cpu_ptr(s->cpu_sheaves);
4997 
4998 	if (pcs->spare) {
4999 		sheaf = pcs->spare;
5000 		pcs->spare = NULL;
5001 		stat(s, SHEAF_PREFILL_FAST);
5002 	} else {
5003 		barn = get_barn(s);
5004 
5005 		stat(s, SHEAF_PREFILL_SLOW);
5006 		if (barn)
5007 			sheaf = barn_get_full_or_empty_sheaf(barn);
5008 		if (sheaf && sheaf->size)
5009 			stat(s, BARN_GET);
5010 		else
5011 			stat(s, BARN_GET_FAIL);
5012 	}
5013 
5014 	local_unlock(&s->cpu_sheaves->lock);
5015 
5016 
5017 	if (!sheaf)
5018 		sheaf = alloc_empty_sheaf(s, gfp);
5019 
5020 	if (sheaf) {
5021 		sheaf->capacity = s->sheaf_capacity;
5022 		sheaf->pfmemalloc = false;
5023 
5024 		if (sheaf->size < size &&
5025 		    __prefill_sheaf_pfmemalloc(s, sheaf, gfp)) {
5026 			sheaf_flush_unused(s, sheaf);
5027 			free_empty_sheaf(s, sheaf);
5028 			sheaf = NULL;
5029 		}
5030 	}
5031 
5032 	return sheaf;
5033 }
5034 
5035 /*
5036  * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf()
5037  *
5038  * If the sheaf cannot simply become the percpu spare sheaf, but there's space
5039  * for a full sheaf in the barn, we try to refill the sheaf back to the cache's
5040  * sheaf_capacity to avoid handling partially full sheaves.
5041  *
5042  * If the refill fails because gfp is e.g. GFP_NOWAIT, or the barn is full, the
5043  * sheaf is instead flushed and freed.
5044  */
5045 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
5046 			     struct slab_sheaf *sheaf)
5047 {
5048 	struct slub_percpu_sheaves *pcs;
5049 	struct node_barn *barn;
5050 
5051 	if (unlikely((sheaf->capacity != s->sheaf_capacity)
5052 		     || sheaf->pfmemalloc)) {
5053 		sheaf_flush_unused(s, sheaf);
5054 		kfree(sheaf);
5055 		return;
5056 	}
5057 
5058 	local_lock(&s->cpu_sheaves->lock);
5059 	pcs = this_cpu_ptr(s->cpu_sheaves);
5060 	barn = get_barn(s);
5061 
5062 	if (!pcs->spare) {
5063 		pcs->spare = sheaf;
5064 		sheaf = NULL;
5065 		stat(s, SHEAF_RETURN_FAST);
5066 	}
5067 
5068 	local_unlock(&s->cpu_sheaves->lock);
5069 
5070 	if (!sheaf)
5071 		return;
5072 
5073 	stat(s, SHEAF_RETURN_SLOW);
5074 
5075 	/*
5076 	 * If the barn has too many full sheaves or we fail to refill the sheaf,
5077 	 * simply flush and free it.
5078 	 */
5079 	if (!barn || data_race(barn->nr_full) >= MAX_FULL_SHEAVES ||
5080 	    refill_sheaf(s, sheaf, gfp)) {
5081 		sheaf_flush_unused(s, sheaf);
5082 		free_empty_sheaf(s, sheaf);
5083 		return;
5084 	}
5085 
5086 	barn_put_full_sheaf(barn, sheaf);
5087 	stat(s, BARN_PUT);
5088 }
5089 
5090 /*
5091  * refill a sheaf previously returned by kmem_cache_prefill_sheaf to at least
5092  * the given size
5093  *
5094  * the sheaf might be replaced by a new one when requesting more than
5095  * s->sheaf_capacity objects if such replacement is necessary, but the refill
5096  * fails (returning -ENOMEM), the existing sheaf is left intact
5097  *
5098  * In practice we always refill to full sheaf's capacity.
5099  */
5100 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
5101 			    struct slab_sheaf **sheafp, unsigned int size)
5102 {
5103 	struct slab_sheaf *sheaf;
5104 
5105 	/*
5106 	 * TODO: do we want to support *sheaf == NULL to be equivalent of
5107 	 * kmem_cache_prefill_sheaf() ?
5108 	 */
5109 	if (!sheafp || !(*sheafp))
5110 		return -EINVAL;
5111 
5112 	sheaf = *sheafp;
5113 	if (sheaf->size >= size)
5114 		return 0;
5115 
5116 	if (likely(sheaf->capacity >= size)) {
5117 		if (likely(sheaf->capacity == s->sheaf_capacity))
5118 			return __prefill_sheaf_pfmemalloc(s, sheaf, gfp);
5119 
5120 		if (!__kmem_cache_alloc_bulk(s, gfp, sheaf->capacity - sheaf->size,
5121 					     &sheaf->objects[sheaf->size])) {
5122 			return -ENOMEM;
5123 		}
5124 		sheaf->size = sheaf->capacity;
5125 
5126 		return 0;
5127 	}
5128 
5129 	/*
5130 	 * We had a regular sized sheaf and need an oversize one, or we had an
5131 	 * oversize one already but need a larger one now.
5132 	 * This should be a very rare path so let's not complicate it.
5133 	 */
5134 	sheaf = kmem_cache_prefill_sheaf(s, gfp, size);
5135 	if (!sheaf)
5136 		return -ENOMEM;
5137 
5138 	kmem_cache_return_sheaf(s, gfp, *sheafp);
5139 	*sheafp = sheaf;
5140 	return 0;
5141 }
5142 
5143 /*
5144  * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf()
5145  *
5146  * Guaranteed not to fail as many allocations as was the requested size.
5147  * After the sheaf is emptied, it fails - no fallback to the slab cache itself.
5148  *
5149  * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT
5150  * memcg charging is forced over limit if necessary, to avoid failure.
5151  *
5152  * It is possible that the allocation comes from kfence and then the sheaf
5153  * size is not decreased.
5154  */
5155 void *
5156 kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
5157 				   struct slab_sheaf *sheaf)
5158 {
5159 	void *ret = NULL;
5160 	bool init;
5161 
5162 	if (sheaf->size == 0)
5163 		goto out;
5164 
5165 	ret = kfence_alloc(s, s->object_size, gfp);
5166 
5167 	if (likely(!ret))
5168 		ret = sheaf->objects[--sheaf->size];
5169 
5170 	init = slab_want_init_on_alloc(gfp, s);
5171 
5172 	/* add __GFP_NOFAIL to force successful memcg charging */
5173 	slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size);
5174 out:
5175 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE);
5176 
5177 	return ret;
5178 }
5179 
5180 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
5181 {
5182 	return sheaf->size;
5183 }
5184 /*
5185  * To avoid unnecessary overhead, we pass through large allocation requests
5186  * directly to the page allocator. We use __GFP_COMP, because we will need to
5187  * know the allocation order to free the pages properly in kfree.
5188  */
5189 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
5190 {
5191 	struct page *page;
5192 	void *ptr = NULL;
5193 	unsigned int order = get_order(size);
5194 
5195 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
5196 		flags = kmalloc_fix_flags(flags);
5197 
5198 	flags |= __GFP_COMP;
5199 
5200 	if (node == NUMA_NO_NODE)
5201 		page = alloc_frozen_pages_noprof(flags, order);
5202 	else
5203 		page = __alloc_frozen_pages_noprof(flags, order, node, NULL);
5204 
5205 	if (page) {
5206 		ptr = page_address(page);
5207 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
5208 				      PAGE_SIZE << order);
5209 		__SetPageLargeKmalloc(page);
5210 	}
5211 
5212 	ptr = kasan_kmalloc_large(ptr, size, flags);
5213 	/* As ptr might get tagged, call kmemleak hook after KASAN. */
5214 	kmemleak_alloc(ptr, size, 1, flags);
5215 	kmsan_kmalloc_large(ptr, size, flags);
5216 
5217 	return ptr;
5218 }
5219 
5220 void *__kmalloc_large_noprof(size_t size, gfp_t flags)
5221 {
5222 	void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
5223 
5224 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
5225 		      flags, NUMA_NO_NODE);
5226 	return ret;
5227 }
5228 EXPORT_SYMBOL(__kmalloc_large_noprof);
5229 
5230 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
5231 {
5232 	void *ret = ___kmalloc_large_node(size, flags, node);
5233 
5234 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
5235 		      flags, node);
5236 	return ret;
5237 }
5238 EXPORT_SYMBOL(__kmalloc_large_node_noprof);
5239 
5240 static __always_inline
5241 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
5242 			unsigned long caller)
5243 {
5244 	struct kmem_cache *s;
5245 	void *ret;
5246 
5247 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5248 		ret = __kmalloc_large_node_noprof(size, flags, node);
5249 		trace_kmalloc(caller, ret, size,
5250 			      PAGE_SIZE << get_order(size), flags, node);
5251 		return ret;
5252 	}
5253 
5254 	if (unlikely(!size))
5255 		return ZERO_SIZE_PTR;
5256 
5257 	s = kmalloc_slab(size, b, flags, caller);
5258 
5259 	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
5260 	ret = kasan_kmalloc(s, ret, size, flags);
5261 	trace_kmalloc(caller, ret, size, s->size, flags, node);
5262 	return ret;
5263 }
5264 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
5265 {
5266 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
5267 }
5268 EXPORT_SYMBOL(__kmalloc_node_noprof);
5269 
5270 void *__kmalloc_noprof(size_t size, gfp_t flags)
5271 {
5272 	return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
5273 }
5274 EXPORT_SYMBOL(__kmalloc_noprof);
5275 
5276 /**
5277  * kmalloc_nolock - Allocate an object of given size from any context.
5278  * @size: size to allocate
5279  * @gfp_flags: GFP flags. Only __GFP_ACCOUNT, __GFP_ZERO, __GFP_NO_OBJ_EXT
5280  * allowed.
5281  * @node: node number of the target node.
5282  *
5283  * Return: pointer to the new object or NULL in case of error.
5284  * NULL does not mean EBUSY or EAGAIN. It means ENOMEM.
5285  * There is no reason to call it again and expect !NULL.
5286  */
5287 void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
5288 {
5289 	gfp_t alloc_gfp = __GFP_NOWARN | __GFP_NOMEMALLOC | gfp_flags;
5290 	struct kmem_cache *s;
5291 	bool can_retry = true;
5292 	void *ret;
5293 
5294 	VM_WARN_ON_ONCE(gfp_flags & ~(__GFP_ACCOUNT | __GFP_ZERO |
5295 				      __GFP_NO_OBJ_EXT));
5296 
5297 	if (unlikely(!size))
5298 		return ZERO_SIZE_PTR;
5299 
5300 	/*
5301 	 * See the comment for the same check in
5302 	 * alloc_frozen_pages_nolock_noprof()
5303 	 */
5304 	if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
5305 		return NULL;
5306 
5307 retry:
5308 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
5309 		return NULL;
5310 	s = kmalloc_slab(size, NULL, alloc_gfp, _RET_IP_);
5311 
5312 	if (!(s->flags & __CMPXCHG_DOUBLE) && !kmem_cache_debug(s))
5313 		/*
5314 		 * kmalloc_nolock() is not supported on architectures that
5315 		 * don't implement cmpxchg16b and thus need slab_lock()
5316 		 * which could be preempted by a nmi.
5317 		 * But debug caches don't use that and only rely on
5318 		 * kmem_cache_node->list_lock, so kmalloc_nolock() can attempt
5319 		 * to allocate from debug caches by
5320 		 * spin_trylock_irqsave(&n->list_lock, ...)
5321 		 */
5322 		return NULL;
5323 
5324 	ret = alloc_from_pcs(s, alloc_gfp, node);
5325 	if (ret)
5326 		goto success;
5327 
5328 	/*
5329 	 * Do not call slab_alloc_node(), since trylock mode isn't
5330 	 * compatible with slab_pre_alloc_hook/should_failslab and
5331 	 * kfence_alloc. Hence call __slab_alloc_node() (at most twice)
5332 	 * and slab_post_alloc_hook() directly.
5333 	 */
5334 	ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size);
5335 
5336 	/*
5337 	 * It's possible we failed due to trylock as we preempted someone with
5338 	 * the sheaves locked, and the list_lock is also held by another cpu.
5339 	 * But it should be rare that multiple kmalloc buckets would have
5340 	 * sheaves locked, so try a larger one.
5341 	 */
5342 	if (!ret && can_retry) {
5343 		/* pick the next kmalloc bucket */
5344 		size = s->object_size + 1;
5345 		/*
5346 		 * Another alternative is to
5347 		 * if (memcg) alloc_gfp &= ~__GFP_ACCOUNT;
5348 		 * else if (!memcg) alloc_gfp |= __GFP_ACCOUNT;
5349 		 * to retry from bucket of the same size.
5350 		 */
5351 		can_retry = false;
5352 		goto retry;
5353 	}
5354 
5355 success:
5356 	maybe_wipe_obj_freeptr(s, ret);
5357 	slab_post_alloc_hook(s, NULL, alloc_gfp, 1, &ret,
5358 			     slab_want_init_on_alloc(alloc_gfp, s), size);
5359 
5360 	ret = kasan_kmalloc(s, ret, size, alloc_gfp);
5361 	return ret;
5362 }
5363 EXPORT_SYMBOL_GPL(kmalloc_nolock_noprof);
5364 
5365 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
5366 					 int node, unsigned long caller)
5367 {
5368 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
5369 
5370 }
5371 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
5372 
5373 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
5374 {
5375 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
5376 					    _RET_IP_, size);
5377 
5378 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
5379 
5380 	ret = kasan_kmalloc(s, ret, size, gfpflags);
5381 	return ret;
5382 }
5383 EXPORT_SYMBOL(__kmalloc_cache_noprof);
5384 
5385 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
5386 				  int node, size_t size)
5387 {
5388 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
5389 
5390 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
5391 
5392 	ret = kasan_kmalloc(s, ret, size, gfpflags);
5393 	return ret;
5394 }
5395 EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
5396 
5397 static noinline void free_to_partial_list(
5398 	struct kmem_cache *s, struct slab *slab,
5399 	void *head, void *tail, int bulk_cnt,
5400 	unsigned long addr)
5401 {
5402 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
5403 	struct slab *slab_free = NULL;
5404 	int cnt = bulk_cnt;
5405 	unsigned long flags;
5406 	depot_stack_handle_t handle = 0;
5407 
5408 	/*
5409 	 * We cannot use GFP_NOWAIT as there are callsites where waking up
5410 	 * kswapd could deadlock
5411 	 */
5412 	if (s->flags & SLAB_STORE_USER)
5413 		handle = set_track_prepare(__GFP_NOWARN);
5414 
5415 	spin_lock_irqsave(&n->list_lock, flags);
5416 
5417 	if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
5418 		void *prior = slab->freelist;
5419 
5420 		/* Perform the actual freeing while we still hold the locks */
5421 		slab->inuse -= cnt;
5422 		set_freepointer(s, tail, prior);
5423 		slab->freelist = head;
5424 
5425 		/*
5426 		 * If the slab is empty, and node's partial list is full,
5427 		 * it should be discarded anyway no matter it's on full or
5428 		 * partial list.
5429 		 */
5430 		if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
5431 			slab_free = slab;
5432 
5433 		if (!prior) {
5434 			/* was on full list */
5435 			remove_full(s, n, slab);
5436 			if (!slab_free) {
5437 				add_partial(n, slab, ADD_TO_TAIL);
5438 				stat(s, FREE_ADD_PARTIAL);
5439 			}
5440 		} else if (slab_free) {
5441 			remove_partial(n, slab);
5442 			stat(s, FREE_REMOVE_PARTIAL);
5443 		}
5444 	}
5445 
5446 	if (slab_free) {
5447 		/*
5448 		 * Update the counters while still holding n->list_lock to
5449 		 * prevent spurious validation warnings
5450 		 */
5451 		dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
5452 	}
5453 
5454 	spin_unlock_irqrestore(&n->list_lock, flags);
5455 
5456 	if (slab_free) {
5457 		stat(s, FREE_SLAB);
5458 		free_slab(s, slab_free);
5459 	}
5460 }
5461 
5462 /*
5463  * Slow path handling. This may still be called frequently since objects
5464  * have a longer lifetime than the cpu slabs in most processing loads.
5465  *
5466  * So we still attempt to reduce cache line usage. Just take the slab
5467  * lock and free the item. If there is no additional partial slab
5468  * handling required then we can return immediately.
5469  */
5470 static void __slab_free(struct kmem_cache *s, struct slab *slab,
5471 			void *head, void *tail, int cnt,
5472 			unsigned long addr)
5473 
5474 {
5475 	bool was_full;
5476 	struct freelist_counters old, new;
5477 	struct kmem_cache_node *n = NULL;
5478 	unsigned long flags;
5479 	bool on_node_partial;
5480 
5481 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
5482 		free_to_partial_list(s, slab, head, tail, cnt, addr);
5483 		return;
5484 	}
5485 
5486 	do {
5487 		if (unlikely(n)) {
5488 			spin_unlock_irqrestore(&n->list_lock, flags);
5489 			n = NULL;
5490 		}
5491 
5492 		old.freelist = slab->freelist;
5493 		old.counters = slab->counters;
5494 
5495 		was_full = (old.freelist == NULL);
5496 
5497 		set_freepointer(s, tail, old.freelist);
5498 
5499 		new.freelist = head;
5500 		new.counters = old.counters;
5501 		new.inuse -= cnt;
5502 
5503 		/*
5504 		 * Might need to be taken off (due to becoming empty) or added
5505 		 * to (due to not being full anymore) the partial list.
5506 		 * Unless it's frozen.
5507 		 */
5508 		if (!new.inuse || was_full) {
5509 
5510 			n = get_node(s, slab_nid(slab));
5511 			/*
5512 			 * Speculatively acquire the list_lock.
5513 			 * If the cmpxchg does not succeed then we may
5514 			 * drop the list_lock without any processing.
5515 			 *
5516 			 * Otherwise the list_lock will synchronize with
5517 			 * other processors updating the list of slabs.
5518 			 */
5519 			spin_lock_irqsave(&n->list_lock, flags);
5520 
5521 			on_node_partial = slab_test_node_partial(slab);
5522 		}
5523 
5524 	} while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
5525 
5526 	if (likely(!n)) {
5527 		/*
5528 		 * We didn't take the list_lock because the slab was already on
5529 		 * the partial list and will remain there.
5530 		 */
5531 		return;
5532 	}
5533 
5534 	/*
5535 	 * This slab was partially empty but not on the per-node partial list,
5536 	 * in which case we shouldn't manipulate its list, just return.
5537 	 */
5538 	if (!was_full && !on_node_partial) {
5539 		spin_unlock_irqrestore(&n->list_lock, flags);
5540 		return;
5541 	}
5542 
5543 	/*
5544 	 * If slab became empty, should we add/keep it on the partial list or we
5545 	 * have enough?
5546 	 */
5547 	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
5548 		goto slab_empty;
5549 
5550 	/*
5551 	 * Objects left in the slab. If it was not on the partial list before
5552 	 * then add it.
5553 	 */
5554 	if (unlikely(was_full)) {
5555 		add_partial(n, slab, ADD_TO_TAIL);
5556 		stat(s, FREE_ADD_PARTIAL);
5557 	}
5558 	spin_unlock_irqrestore(&n->list_lock, flags);
5559 	return;
5560 
5561 slab_empty:
5562 	/*
5563 	 * The slab could have a single object and thus go from full to empty in
5564 	 * a single free, but more likely it was on the partial list. Remove it.
5565 	 */
5566 	if (likely(!was_full)) {
5567 		remove_partial(n, slab);
5568 		stat(s, FREE_REMOVE_PARTIAL);
5569 	}
5570 
5571 	spin_unlock_irqrestore(&n->list_lock, flags);
5572 	stat(s, FREE_SLAB);
5573 	discard_slab(s, slab);
5574 }
5575 
5576 /*
5577  * pcs is locked. We should have get rid of the spare sheaf and obtained an
5578  * empty sheaf, while the main sheaf is full. We want to install the empty sheaf
5579  * as a main sheaf, and make the current main sheaf a spare sheaf.
5580  *
5581  * However due to having relinquished the cpu_sheaves lock when obtaining
5582  * the empty sheaf, we need to handle some unlikely but possible cases.
5583  *
5584  * If we put any sheaf to barn here, it's because we were interrupted or have
5585  * been migrated to a different cpu, which should be rare enough so just ignore
5586  * the barn's limits to simplify the handling.
5587  *
5588  * An alternative scenario that gets us here is when we fail
5589  * barn_replace_full_sheaf(), because there's no empty sheaf available in the
5590  * barn, so we had to allocate it by alloc_empty_sheaf(). But because we saw the
5591  * limit on full sheaves was not exceeded, we assume it didn't change and just
5592  * put the full sheaf there.
5593  */
5594 static void __pcs_install_empty_sheaf(struct kmem_cache *s,
5595 		struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty,
5596 		struct node_barn *barn)
5597 {
5598 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
5599 
5600 	/* This is what we expect to find if nobody interrupted us. */
5601 	if (likely(!pcs->spare)) {
5602 		pcs->spare = pcs->main;
5603 		pcs->main = empty;
5604 		return;
5605 	}
5606 
5607 	/*
5608 	 * Unlikely because if the main sheaf had space, we would have just
5609 	 * freed to it. Get rid of our empty sheaf.
5610 	 */
5611 	if (pcs->main->size < s->sheaf_capacity) {
5612 		barn_put_empty_sheaf(barn, empty);
5613 		return;
5614 	}
5615 
5616 	/* Also unlikely for the same reason */
5617 	if (pcs->spare->size < s->sheaf_capacity) {
5618 		swap(pcs->main, pcs->spare);
5619 		barn_put_empty_sheaf(barn, empty);
5620 		return;
5621 	}
5622 
5623 	/*
5624 	 * We probably failed barn_replace_full_sheaf() due to no empty sheaf
5625 	 * available there, but we allocated one, so finish the job.
5626 	 */
5627 	barn_put_full_sheaf(barn, pcs->main);
5628 	stat(s, BARN_PUT);
5629 	pcs->main = empty;
5630 }
5631 
5632 /*
5633  * Replace the full main sheaf with a (at least partially) empty sheaf.
5634  *
5635  * Must be called with the cpu_sheaves local lock locked. If successful, returns
5636  * the pcs pointer and the local lock locked (possibly on a different cpu than
5637  * initially called). If not successful, returns NULL and the local lock
5638  * unlocked.
5639  */
5640 static struct slub_percpu_sheaves *
5641 __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
5642 			bool allow_spin)
5643 {
5644 	struct slab_sheaf *empty;
5645 	struct node_barn *barn;
5646 	bool put_fail;
5647 
5648 restart:
5649 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
5650 
5651 	/* Bootstrap or debug cache, back off */
5652 	if (unlikely(!cache_has_sheaves(s))) {
5653 		local_unlock(&s->cpu_sheaves->lock);
5654 		return NULL;
5655 	}
5656 
5657 	barn = get_barn(s);
5658 	if (!barn) {
5659 		local_unlock(&s->cpu_sheaves->lock);
5660 		return NULL;
5661 	}
5662 
5663 	put_fail = false;
5664 
5665 	if (!pcs->spare) {
5666 		empty = barn_get_empty_sheaf(barn, allow_spin);
5667 		if (empty) {
5668 			pcs->spare = pcs->main;
5669 			pcs->main = empty;
5670 			return pcs;
5671 		}
5672 		goto alloc_empty;
5673 	}
5674 
5675 	if (pcs->spare->size < s->sheaf_capacity) {
5676 		swap(pcs->main, pcs->spare);
5677 		return pcs;
5678 	}
5679 
5680 	empty = barn_replace_full_sheaf(barn, pcs->main, allow_spin);
5681 
5682 	if (!IS_ERR(empty)) {
5683 		stat(s, BARN_PUT);
5684 		pcs->main = empty;
5685 		return pcs;
5686 	}
5687 
5688 	/* sheaf_flush_unused() doesn't support !allow_spin */
5689 	if (PTR_ERR(empty) == -E2BIG && allow_spin) {
5690 		/* Since we got here, spare exists and is full */
5691 		struct slab_sheaf *to_flush = pcs->spare;
5692 
5693 		stat(s, BARN_PUT_FAIL);
5694 
5695 		pcs->spare = NULL;
5696 		local_unlock(&s->cpu_sheaves->lock);
5697 
5698 		sheaf_flush_unused(s, to_flush);
5699 		empty = to_flush;
5700 		goto got_empty;
5701 	}
5702 
5703 	/*
5704 	 * We could not replace full sheaf because barn had no empty
5705 	 * sheaves. We can still allocate it and put the full sheaf in
5706 	 * __pcs_install_empty_sheaf(), but if we fail to allocate it,
5707 	 * make sure to count the fail.
5708 	 */
5709 	put_fail = true;
5710 
5711 alloc_empty:
5712 	local_unlock(&s->cpu_sheaves->lock);
5713 
5714 	/*
5715 	 * alloc_empty_sheaf() doesn't support !allow_spin and it's
5716 	 * easier to fall back to freeing directly without sheaves
5717 	 * than add the support (and to sheaf_flush_unused() above)
5718 	 */
5719 	if (!allow_spin)
5720 		return NULL;
5721 
5722 	empty = alloc_empty_sheaf(s, GFP_NOWAIT);
5723 	if (empty)
5724 		goto got_empty;
5725 
5726 	if (put_fail)
5727 		 stat(s, BARN_PUT_FAIL);
5728 
5729 	if (!sheaf_try_flush_main(s))
5730 		return NULL;
5731 
5732 	if (!local_trylock(&s->cpu_sheaves->lock))
5733 		return NULL;
5734 
5735 	pcs = this_cpu_ptr(s->cpu_sheaves);
5736 
5737 	/*
5738 	 * we flushed the main sheaf so it should be empty now,
5739 	 * but in case we got preempted or migrated, we need to
5740 	 * check again
5741 	 */
5742 	if (pcs->main->size == s->sheaf_capacity)
5743 		goto restart;
5744 
5745 	return pcs;
5746 
5747 got_empty:
5748 	if (!local_trylock(&s->cpu_sheaves->lock)) {
5749 		barn_put_empty_sheaf(barn, empty);
5750 		return NULL;
5751 	}
5752 
5753 	pcs = this_cpu_ptr(s->cpu_sheaves);
5754 	__pcs_install_empty_sheaf(s, pcs, empty, barn);
5755 
5756 	return pcs;
5757 }
5758 
5759 /*
5760  * Free an object to the percpu sheaves.
5761  * The object is expected to have passed slab_free_hook() already.
5762  */
5763 static __fastpath_inline
5764 bool free_to_pcs(struct kmem_cache *s, void *object, bool allow_spin)
5765 {
5766 	struct slub_percpu_sheaves *pcs;
5767 
5768 	if (!local_trylock(&s->cpu_sheaves->lock))
5769 		return false;
5770 
5771 	pcs = this_cpu_ptr(s->cpu_sheaves);
5772 
5773 	if (unlikely(pcs->main->size == s->sheaf_capacity)) {
5774 
5775 		pcs = __pcs_replace_full_main(s, pcs, allow_spin);
5776 		if (unlikely(!pcs))
5777 			return false;
5778 	}
5779 
5780 	pcs->main->objects[pcs->main->size++] = object;
5781 
5782 	local_unlock(&s->cpu_sheaves->lock);
5783 
5784 	stat(s, FREE_FASTPATH);
5785 
5786 	return true;
5787 }
5788 
5789 static void rcu_free_sheaf(struct rcu_head *head)
5790 {
5791 	struct kmem_cache_node *n;
5792 	struct slab_sheaf *sheaf;
5793 	struct node_barn *barn = NULL;
5794 	struct kmem_cache *s;
5795 
5796 	sheaf = container_of(head, struct slab_sheaf, rcu_head);
5797 
5798 	s = sheaf->cache;
5799 
5800 	/*
5801 	 * This may remove some objects due to slab_free_hook() returning false,
5802 	 * so that the sheaf might no longer be completely full. But it's easier
5803 	 * to handle it as full (unless it became completely empty), as the code
5804 	 * handles it fine. The only downside is that sheaf will serve fewer
5805 	 * allocations when reused. It only happens due to debugging, which is a
5806 	 * performance hit anyway.
5807 	 *
5808 	 * If it returns true, there was at least one object from pfmemalloc
5809 	 * slab so simply flush everything.
5810 	 */
5811 	if (__rcu_free_sheaf_prepare(s, sheaf))
5812 		goto flush;
5813 
5814 	n = get_node(s, sheaf->node);
5815 	if (!n)
5816 		goto flush;
5817 
5818 	barn = n->barn;
5819 
5820 	/* due to slab_free_hook() */
5821 	if (unlikely(sheaf->size == 0))
5822 		goto empty;
5823 
5824 	/*
5825 	 * Checking nr_full/nr_empty outside lock avoids contention in case the
5826 	 * barn is at the respective limit. Due to the race we might go over the
5827 	 * limit but that should be rare and harmless.
5828 	 */
5829 
5830 	if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) {
5831 		stat(s, BARN_PUT);
5832 		barn_put_full_sheaf(barn, sheaf);
5833 		return;
5834 	}
5835 
5836 flush:
5837 	stat(s, BARN_PUT_FAIL);
5838 	sheaf_flush_unused(s, sheaf);
5839 
5840 empty:
5841 	if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
5842 		barn_put_empty_sheaf(barn, sheaf);
5843 		return;
5844 	}
5845 
5846 	free_empty_sheaf(s, sheaf);
5847 }
5848 
5849 /*
5850  * kvfree_call_rcu() can be called while holding a raw_spinlock_t. Since
5851  * __kfree_rcu_sheaf() may acquire a spinlock_t (sleeping lock on PREEMPT_RT),
5852  * this would violate lock nesting rules. Therefore, kvfree_call_rcu() avoids
5853  * this problem by bypassing the sheaves layer entirely on PREEMPT_RT.
5854  *
5855  * However, lockdep still complains that it is invalid to acquire spinlock_t
5856  * while holding raw_spinlock_t, even on !PREEMPT_RT where spinlock_t is a
5857  * spinning lock. Tell lockdep that acquiring spinlock_t is valid here
5858  * by temporarily raising the wait-type to LD_WAIT_CONFIG.
5859  */
5860 static DEFINE_WAIT_OVERRIDE_MAP(kfree_rcu_sheaf_map, LD_WAIT_CONFIG);
5861 
5862 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
5863 {
5864 	struct slub_percpu_sheaves *pcs;
5865 	struct slab_sheaf *rcu_sheaf;
5866 
5867 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
5868 		return false;
5869 
5870 	lock_map_acquire_try(&kfree_rcu_sheaf_map);
5871 
5872 	if (!local_trylock(&s->cpu_sheaves->lock))
5873 		goto fail;
5874 
5875 	pcs = this_cpu_ptr(s->cpu_sheaves);
5876 
5877 	if (unlikely(!pcs->rcu_free)) {
5878 
5879 		struct slab_sheaf *empty;
5880 		struct node_barn *barn;
5881 
5882 		/* Bootstrap or debug cache, fall back */
5883 		if (unlikely(!cache_has_sheaves(s))) {
5884 			local_unlock(&s->cpu_sheaves->lock);
5885 			goto fail;
5886 		}
5887 
5888 		if (pcs->spare && pcs->spare->size == 0) {
5889 			pcs->rcu_free = pcs->spare;
5890 			pcs->spare = NULL;
5891 			goto do_free;
5892 		}
5893 
5894 		barn = get_barn(s);
5895 		if (!barn) {
5896 			local_unlock(&s->cpu_sheaves->lock);
5897 			goto fail;
5898 		}
5899 
5900 		empty = barn_get_empty_sheaf(barn, true);
5901 
5902 		if (empty) {
5903 			pcs->rcu_free = empty;
5904 			goto do_free;
5905 		}
5906 
5907 		local_unlock(&s->cpu_sheaves->lock);
5908 
5909 		empty = alloc_empty_sheaf(s, GFP_NOWAIT);
5910 
5911 		if (!empty)
5912 			goto fail;
5913 
5914 		if (!local_trylock(&s->cpu_sheaves->lock)) {
5915 			barn_put_empty_sheaf(barn, empty);
5916 			goto fail;
5917 		}
5918 
5919 		pcs = this_cpu_ptr(s->cpu_sheaves);
5920 
5921 		if (unlikely(pcs->rcu_free))
5922 			barn_put_empty_sheaf(barn, empty);
5923 		else
5924 			pcs->rcu_free = empty;
5925 	}
5926 
5927 do_free:
5928 
5929 	rcu_sheaf = pcs->rcu_free;
5930 
5931 	/*
5932 	 * Since we flush immediately when size reaches capacity, we never reach
5933 	 * this with size already at capacity, so no OOB write is possible.
5934 	 */
5935 	rcu_sheaf->objects[rcu_sheaf->size++] = obj;
5936 
5937 	if (likely(rcu_sheaf->size < s->sheaf_capacity)) {
5938 		rcu_sheaf = NULL;
5939 	} else {
5940 		pcs->rcu_free = NULL;
5941 		rcu_sheaf->node = numa_mem_id();
5942 	}
5943 
5944 	/*
5945 	 * we flush before local_unlock to make sure a racing
5946 	 * flush_all_rcu_sheaves() doesn't miss this sheaf
5947 	 */
5948 	if (rcu_sheaf)
5949 		call_rcu(&rcu_sheaf->rcu_head, rcu_free_sheaf);
5950 
5951 	local_unlock(&s->cpu_sheaves->lock);
5952 
5953 	stat(s, FREE_RCU_SHEAF);
5954 	lock_map_release(&kfree_rcu_sheaf_map);
5955 	return true;
5956 
5957 fail:
5958 	stat(s, FREE_RCU_SHEAF_FAIL);
5959 	lock_map_release(&kfree_rcu_sheaf_map);
5960 	return false;
5961 }
5962 
5963 /*
5964  * Bulk free objects to the percpu sheaves.
5965  * Unlike free_to_pcs() this includes the calls to all necessary hooks
5966  * and the fallback to freeing to slab pages.
5967  */
5968 static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
5969 {
5970 	struct slub_percpu_sheaves *pcs;
5971 	struct slab_sheaf *main, *empty;
5972 	bool init = slab_want_init_on_free(s);
5973 	unsigned int batch, i = 0;
5974 	struct node_barn *barn;
5975 	void *remote_objects[PCS_BATCH_MAX];
5976 	unsigned int remote_nr = 0;
5977 	int node = numa_mem_id();
5978 
5979 next_remote_batch:
5980 	while (i < size) {
5981 		struct slab *slab = virt_to_slab(p[i]);
5982 
5983 		memcg_slab_free_hook(s, slab, p + i, 1);
5984 		alloc_tagging_slab_free_hook(s, slab, p + i, 1);
5985 
5986 		if (unlikely(!slab_free_hook(s, p[i], init, false))) {
5987 			p[i] = p[--size];
5988 			continue;
5989 		}
5990 
5991 		if (unlikely((IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)
5992 			     || slab_test_pfmemalloc(slab))) {
5993 			remote_objects[remote_nr] = p[i];
5994 			p[i] = p[--size];
5995 			if (++remote_nr >= PCS_BATCH_MAX)
5996 				goto flush_remote;
5997 			continue;
5998 		}
5999 
6000 		i++;
6001 	}
6002 
6003 	if (!size)
6004 		goto flush_remote;
6005 
6006 next_batch:
6007 	if (!local_trylock(&s->cpu_sheaves->lock))
6008 		goto fallback;
6009 
6010 	pcs = this_cpu_ptr(s->cpu_sheaves);
6011 
6012 	if (likely(pcs->main->size < s->sheaf_capacity))
6013 		goto do_free;
6014 
6015 	barn = get_barn(s);
6016 	if (!barn)
6017 		goto no_empty;
6018 
6019 	if (!pcs->spare) {
6020 		empty = barn_get_empty_sheaf(barn, true);
6021 		if (!empty)
6022 			goto no_empty;
6023 
6024 		pcs->spare = pcs->main;
6025 		pcs->main = empty;
6026 		goto do_free;
6027 	}
6028 
6029 	if (pcs->spare->size < s->sheaf_capacity) {
6030 		swap(pcs->main, pcs->spare);
6031 		goto do_free;
6032 	}
6033 
6034 	empty = barn_replace_full_sheaf(barn, pcs->main, true);
6035 	if (IS_ERR(empty)) {
6036 		stat(s, BARN_PUT_FAIL);
6037 		goto no_empty;
6038 	}
6039 
6040 	stat(s, BARN_PUT);
6041 	pcs->main = empty;
6042 
6043 do_free:
6044 	main = pcs->main;
6045 	batch = min(size, s->sheaf_capacity - main->size);
6046 
6047 	memcpy(main->objects + main->size, p, batch * sizeof(void *));
6048 	main->size += batch;
6049 
6050 	local_unlock(&s->cpu_sheaves->lock);
6051 
6052 	stat_add(s, FREE_FASTPATH, batch);
6053 
6054 	if (batch < size) {
6055 		p += batch;
6056 		size -= batch;
6057 		goto next_batch;
6058 	}
6059 
6060 	if (remote_nr)
6061 		goto flush_remote;
6062 
6063 	return;
6064 
6065 no_empty:
6066 	local_unlock(&s->cpu_sheaves->lock);
6067 
6068 	/*
6069 	 * if we depleted all empty sheaves in the barn or there are too
6070 	 * many full sheaves, free the rest to slab pages
6071 	 */
6072 fallback:
6073 	__kmem_cache_free_bulk(s, size, p);
6074 	stat_add(s, FREE_SLOWPATH, size);
6075 
6076 flush_remote:
6077 	if (remote_nr) {
6078 		__kmem_cache_free_bulk(s, remote_nr, &remote_objects[0]);
6079 		stat_add(s, FREE_SLOWPATH, remote_nr);
6080 		if (i < size) {
6081 			remote_nr = 0;
6082 			goto next_remote_batch;
6083 		}
6084 	}
6085 }
6086 
6087 struct defer_free {
6088 	struct llist_head objects;
6089 	struct irq_work work;
6090 };
6091 
6092 static void free_deferred_objects(struct irq_work *work);
6093 
6094 static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = {
6095 	.objects = LLIST_HEAD_INIT(objects),
6096 	.work = IRQ_WORK_INIT(free_deferred_objects),
6097 };
6098 
6099 /*
6100  * In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe
6101  * to take sleeping spin_locks from __slab_free().
6102  * In !PREEMPT_RT irq_work will run after local_unlock_irqrestore().
6103  */
6104 static void free_deferred_objects(struct irq_work *work)
6105 {
6106 	struct defer_free *df = container_of(work, struct defer_free, work);
6107 	struct llist_head *objs = &df->objects;
6108 	struct llist_node *llnode, *pos, *t;
6109 
6110 	if (llist_empty(objs))
6111 		return;
6112 
6113 	llnode = llist_del_all(objs);
6114 	llist_for_each_safe(pos, t, llnode) {
6115 		struct kmem_cache *s;
6116 		struct slab *slab;
6117 		void *x = pos;
6118 
6119 		slab = virt_to_slab(x);
6120 		s = slab->slab_cache;
6121 
6122 		/* Point 'x' back to the beginning of allocated object */
6123 		x -= s->offset;
6124 
6125 		/*
6126 		 * We used freepointer in 'x' to link 'x' into df->objects.
6127 		 * Clear it to NULL to avoid false positive detection
6128 		 * of "Freepointer corruption".
6129 		 */
6130 		set_freepointer(s, x, NULL);
6131 
6132 		__slab_free(s, slab, x, x, 1, _THIS_IP_);
6133 		stat(s, FREE_SLOWPATH);
6134 	}
6135 }
6136 
6137 static void defer_free(struct kmem_cache *s, void *head)
6138 {
6139 	struct defer_free *df;
6140 
6141 	guard(preempt)();
6142 
6143 	head = kasan_reset_tag(head);
6144 
6145 	df = this_cpu_ptr(&defer_free_objects);
6146 	if (llist_add(head + s->offset, &df->objects))
6147 		irq_work_queue(&df->work);
6148 }
6149 
6150 void defer_free_barrier(void)
6151 {
6152 	int cpu;
6153 
6154 	for_each_possible_cpu(cpu)
6155 		irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
6156 }
6157 
6158 static __fastpath_inline
6159 void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
6160 	       unsigned long addr)
6161 {
6162 	memcg_slab_free_hook(s, slab, &object, 1);
6163 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
6164 
6165 	if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), false)))
6166 		return;
6167 
6168 	if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())
6169 	    && likely(!slab_test_pfmemalloc(slab))) {
6170 		if (likely(free_to_pcs(s, object, true)))
6171 			return;
6172 	}
6173 
6174 	__slab_free(s, slab, object, object, 1, addr);
6175 	stat(s, FREE_SLOWPATH);
6176 }
6177 
6178 #ifdef CONFIG_MEMCG
6179 /* Do not inline the rare memcg charging failed path into the allocation path */
6180 static noinline
6181 void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
6182 {
6183 	struct slab *slab = virt_to_slab(object);
6184 
6185 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
6186 
6187 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
6188 		__slab_free(s, slab, object, object, 1, _RET_IP_);
6189 }
6190 #endif
6191 
6192 static __fastpath_inline
6193 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
6194 		    void *tail, void **p, int cnt, unsigned long addr)
6195 {
6196 	memcg_slab_free_hook(s, slab, p, cnt);
6197 	alloc_tagging_slab_free_hook(s, slab, p, cnt);
6198 	/*
6199 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
6200 	 * to remove objects, whose reuse must be delayed.
6201 	 */
6202 	if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) {
6203 		__slab_free(s, slab, head, tail, cnt, addr);
6204 		stat_add(s, FREE_SLOWPATH, cnt);
6205 	}
6206 }
6207 
6208 #ifdef CONFIG_SLUB_RCU_DEBUG
6209 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
6210 {
6211 	struct rcu_delayed_free *delayed_free =
6212 			container_of(rcu_head, struct rcu_delayed_free, head);
6213 	void *object = delayed_free->object;
6214 	struct slab *slab = virt_to_slab(object);
6215 	struct kmem_cache *s;
6216 
6217 	kfree(delayed_free);
6218 
6219 	if (WARN_ON(is_kfence_address(object)))
6220 		return;
6221 
6222 	/* find the object and the cache again */
6223 	if (WARN_ON(!slab))
6224 		return;
6225 	s = slab->slab_cache;
6226 	if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
6227 		return;
6228 
6229 	/* resume freeing */
6230 	if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) {
6231 		__slab_free(s, slab, object, object, 1, _THIS_IP_);
6232 		stat(s, FREE_SLOWPATH);
6233 	}
6234 }
6235 #endif /* CONFIG_SLUB_RCU_DEBUG */
6236 
6237 #ifdef CONFIG_KASAN_GENERIC
6238 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
6239 {
6240 	__slab_free(cache, virt_to_slab(x), x, x, 1, addr);
6241 	stat(cache, FREE_SLOWPATH);
6242 }
6243 #endif
6244 
6245 static noinline void warn_free_bad_obj(struct kmem_cache *s, void *obj)
6246 {
6247 	struct kmem_cache *cachep;
6248 	struct slab *slab;
6249 
6250 	slab = virt_to_slab(obj);
6251 	if (WARN_ONCE(!slab,
6252 			"kmem_cache_free(%s, %p): object is not in a slab page\n",
6253 			s->name, obj))
6254 		return;
6255 
6256 	cachep = slab->slab_cache;
6257 
6258 	if (WARN_ONCE(cachep != s,
6259 			"kmem_cache_free(%s, %p): object belongs to different cache %s\n",
6260 			s->name, obj, cachep ? cachep->name : "(NULL)")) {
6261 		if (cachep)
6262 			print_tracking(cachep, obj);
6263 		return;
6264 	}
6265 }
6266 
6267 /**
6268  * kmem_cache_free - Deallocate an object
6269  * @s: The cache the allocation was from.
6270  * @x: The previously allocated object.
6271  *
6272  * Free an object which was previously allocated from this
6273  * cache.
6274  */
6275 void kmem_cache_free(struct kmem_cache *s, void *x)
6276 {
6277 	struct slab *slab;
6278 
6279 	slab = virt_to_slab(x);
6280 
6281 	if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) ||
6282 	    kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
6283 
6284 		/*
6285 		 * Intentionally leak the object in these cases, because it
6286 		 * would be too dangerous to continue.
6287 		 */
6288 		if (unlikely(!slab || (slab->slab_cache != s))) {
6289 			warn_free_bad_obj(s, x);
6290 			return;
6291 		}
6292 	}
6293 
6294 	trace_kmem_cache_free(_RET_IP_, x, s);
6295 	slab_free(s, slab, x, _RET_IP_);
6296 }
6297 EXPORT_SYMBOL(kmem_cache_free);
6298 
6299 static inline size_t slab_ksize(struct slab *slab)
6300 {
6301 	struct kmem_cache *s = slab->slab_cache;
6302 
6303 #ifdef CONFIG_SLUB_DEBUG
6304 	/*
6305 	 * Debugging requires use of the padding between object
6306 	 * and whatever may come after it.
6307 	 */
6308 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
6309 		return s->object_size;
6310 #endif
6311 	if (s->flags & SLAB_KASAN)
6312 		return s->object_size;
6313 	/*
6314 	 * If we have the need to store the freelist pointer
6315 	 * or any other metadata back there then we can
6316 	 * only use the space before that information.
6317 	 */
6318 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
6319 		return s->inuse;
6320 	else if (obj_exts_in_object(s, slab))
6321 		return s->inuse;
6322 	/*
6323 	 * Else we can use all the padding etc for the allocation
6324 	 */
6325 	return s->size;
6326 }
6327 
6328 static size_t __ksize(const void *object)
6329 {
6330 	struct page *page;
6331 	struct slab *slab;
6332 
6333 	if (unlikely(object == ZERO_SIZE_PTR))
6334 		return 0;
6335 
6336 	page = virt_to_page(object);
6337 
6338 	if (unlikely(PageLargeKmalloc(page)))
6339 		return large_kmalloc_size(page);
6340 
6341 	slab = page_slab(page);
6342 	/* Delete this after we're sure there are no users */
6343 	if (WARN_ON(!slab))
6344 		return page_size(page);
6345 
6346 #ifdef CONFIG_SLUB_DEBUG
6347 	skip_orig_size_check(slab->slab_cache, object);
6348 #endif
6349 
6350 	return slab_ksize(slab);
6351 }
6352 
6353 /**
6354  * ksize -- Report full size of underlying allocation
6355  * @objp: pointer to the object
6356  *
6357  * This should only be used internally to query the true size of allocations.
6358  * It is not meant to be a way to discover the usable size of an allocation
6359  * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
6360  * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
6361  * and/or FORTIFY_SOURCE.
6362  *
6363  * Return: size of the actual memory used by @objp in bytes
6364  */
6365 size_t ksize(const void *objp)
6366 {
6367 	/*
6368 	 * We need to first check that the pointer to the object is valid.
6369 	 * The KASAN report printed from ksize() is more useful, then when
6370 	 * it's printed later when the behaviour could be undefined due to
6371 	 * a potential use-after-free or double-free.
6372 	 *
6373 	 * We use kasan_check_byte(), which is supported for the hardware
6374 	 * tag-based KASAN mode, unlike kasan_check_read/write().
6375 	 *
6376 	 * If the pointed to memory is invalid, we return 0 to avoid users of
6377 	 * ksize() writing to and potentially corrupting the memory region.
6378 	 *
6379 	 * We want to perform the check before __ksize(), to avoid potentially
6380 	 * crashing in __ksize() due to accessing invalid metadata.
6381 	 */
6382 	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
6383 		return 0;
6384 
6385 	return kfence_ksize(objp) ?: __ksize(objp);
6386 }
6387 EXPORT_SYMBOL(ksize);
6388 
6389 static void free_large_kmalloc(struct page *page, void *object)
6390 {
6391 	unsigned int order = compound_order(page);
6392 
6393 	if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
6394 		dump_page(page, "Not a kmalloc allocation");
6395 		return;
6396 	}
6397 
6398 	if (WARN_ON_ONCE(order == 0))
6399 		pr_warn_once("object pointer: 0x%p\n", object);
6400 
6401 	kmemleak_free(object);
6402 	kasan_kfree_large(object);
6403 	kmsan_kfree_large(object);
6404 
6405 	mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
6406 			      -(PAGE_SIZE << order));
6407 	__ClearPageLargeKmalloc(page);
6408 	free_frozen_pages(page, order);
6409 }
6410 
6411 /*
6412  * Given an rcu_head embedded within an object obtained from kvmalloc at an
6413  * offset < 4k, free the object in question.
6414  */
6415 void kvfree_rcu_cb(struct rcu_head *head)
6416 {
6417 	void *obj = head;
6418 	struct page *page;
6419 	struct slab *slab;
6420 	struct kmem_cache *s;
6421 	void *slab_addr;
6422 
6423 	if (is_vmalloc_addr(obj)) {
6424 		obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
6425 		vfree(obj);
6426 		return;
6427 	}
6428 
6429 	page = virt_to_page(obj);
6430 	slab = page_slab(page);
6431 	if (!slab) {
6432 		/*
6433 		 * rcu_head offset can be only less than page size so no need to
6434 		 * consider allocation order
6435 		 */
6436 		obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
6437 		free_large_kmalloc(page, obj);
6438 		return;
6439 	}
6440 
6441 	s = slab->slab_cache;
6442 	slab_addr = slab_address(slab);
6443 
6444 	if (is_kfence_address(obj)) {
6445 		obj = kfence_object_start(obj);
6446 	} else {
6447 		unsigned int idx = __obj_to_index(s, slab_addr, obj);
6448 
6449 		obj = slab_addr + s->size * idx;
6450 		obj = fixup_red_left(s, obj);
6451 	}
6452 
6453 	slab_free(s, slab, obj, _RET_IP_);
6454 }
6455 
6456 /**
6457  * kfree - free previously allocated memory
6458  * @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc()
6459  *
6460  * If @object is NULL, no operation is performed.
6461  */
6462 void kfree(const void *object)
6463 {
6464 	struct page *page;
6465 	struct slab *slab;
6466 	struct kmem_cache *s;
6467 	void *x = (void *)object;
6468 
6469 	trace_kfree(_RET_IP_, object);
6470 
6471 	if (unlikely(ZERO_OR_NULL_PTR(object)))
6472 		return;
6473 
6474 	page = virt_to_page(object);
6475 	slab = page_slab(page);
6476 	if (!slab) {
6477 		/* kmalloc_nolock() doesn't support large kmalloc */
6478 		free_large_kmalloc(page, (void *)object);
6479 		return;
6480 	}
6481 
6482 	s = slab->slab_cache;
6483 	slab_free(s, slab, x, _RET_IP_);
6484 }
6485 EXPORT_SYMBOL(kfree);
6486 
6487 /*
6488  * Can be called while holding raw_spinlock_t or from IRQ and NMI,
6489  * but ONLY for objects allocated by kmalloc_nolock().
6490  * Debug checks (like kmemleak and kfence) were skipped on allocation,
6491  * hence
6492  * obj = kmalloc(); kfree_nolock(obj);
6493  * will miss kmemleak/kfence book keeping and will cause false positives.
6494  * large_kmalloc is not supported either.
6495  */
6496 void kfree_nolock(const void *object)
6497 {
6498 	struct slab *slab;
6499 	struct kmem_cache *s;
6500 	void *x = (void *)object;
6501 
6502 	if (unlikely(ZERO_OR_NULL_PTR(object)))
6503 		return;
6504 
6505 	slab = virt_to_slab(object);
6506 	if (unlikely(!slab)) {
6507 		WARN_ONCE(1, "large_kmalloc is not supported by kfree_nolock()");
6508 		return;
6509 	}
6510 
6511 	s = slab->slab_cache;
6512 
6513 	memcg_slab_free_hook(s, slab, &x, 1);
6514 	alloc_tagging_slab_free_hook(s, slab, &x, 1);
6515 	/*
6516 	 * Unlike slab_free() do NOT call the following:
6517 	 * kmemleak_free_recursive(x, s->flags);
6518 	 * debug_check_no_locks_freed(x, s->object_size);
6519 	 * debug_check_no_obj_freed(x, s->object_size);
6520 	 * __kcsan_check_access(x, s->object_size, ..);
6521 	 * kfence_free(x);
6522 	 * since they take spinlocks or not safe from any context.
6523 	 */
6524 	kmsan_slab_free(s, x);
6525 	/*
6526 	 * If KASAN finds a kernel bug it will do kasan_report_invalid_free()
6527 	 * which will call raw_spin_lock_irqsave() which is technically
6528 	 * unsafe from NMI, but take chance and report kernel bug.
6529 	 * The sequence of
6530 	 * kasan_report_invalid_free() -> raw_spin_lock_irqsave() -> NMI
6531 	 *  -> kfree_nolock() -> kasan_report_invalid_free() on the same CPU
6532 	 * is double buggy and deserves to deadlock.
6533 	 */
6534 	if (kasan_slab_pre_free(s, x))
6535 		return;
6536 	/*
6537 	 * memcg, kasan_slab_pre_free are done for 'x'.
6538 	 * The only thing left is kasan_poison without quarantine,
6539 	 * since kasan quarantine takes locks and not supported from NMI.
6540 	 */
6541 	kasan_slab_free(s, x, false, false, /* skip quarantine */true);
6542 
6543 	if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())) {
6544 		if (likely(free_to_pcs(s, x, false)))
6545 			return;
6546 	}
6547 
6548 	/*
6549 	 * __slab_free() can locklessly cmpxchg16 into a slab, but then it might
6550 	 * need to take spin_lock for further processing.
6551 	 * Avoid the complexity and simply add to a deferred list.
6552 	 */
6553 	defer_free(s, x);
6554 }
6555 EXPORT_SYMBOL_GPL(kfree_nolock);
6556 
6557 static __always_inline __realloc_size(2) void *
6558 __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, int nid)
6559 {
6560 	void *ret;
6561 	size_t ks = 0;
6562 	int orig_size = 0;
6563 	struct kmem_cache *s = NULL;
6564 
6565 	if (unlikely(ZERO_OR_NULL_PTR(p)))
6566 		goto alloc_new;
6567 
6568 	/* Check for double-free. */
6569 	if (!kasan_check_byte(p))
6570 		return NULL;
6571 
6572 	/*
6573 	 * If reallocation is not necessary (e. g. the new size is less
6574 	 * than the current allocated size), the current allocation will be
6575 	 * preserved unless __GFP_THISNODE is set. In the latter case a new
6576 	 * allocation on the requested node will be attempted.
6577 	 */
6578 	if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
6579 		     nid != page_to_nid(virt_to_page(p)))
6580 		goto alloc_new;
6581 
6582 	if (is_kfence_address(p)) {
6583 		ks = orig_size = kfence_ksize(p);
6584 	} else {
6585 		struct page *page = virt_to_page(p);
6586 		struct slab *slab = page_slab(page);
6587 
6588 		if (!slab) {
6589 			/* Big kmalloc object */
6590 			ks = page_size(page);
6591 			WARN_ON(ks <= KMALLOC_MAX_CACHE_SIZE);
6592 			WARN_ON(p != page_address(page));
6593 		} else {
6594 			s = slab->slab_cache;
6595 			orig_size = get_orig_size(s, (void *)p);
6596 			ks = s->object_size;
6597 		}
6598 	}
6599 
6600 	/* If the old object doesn't fit, allocate a bigger one */
6601 	if (new_size > ks)
6602 		goto alloc_new;
6603 
6604 	/* If the old object doesn't satisfy the new alignment, allocate a new one */
6605 	if (!IS_ALIGNED((unsigned long)p, align))
6606 		goto alloc_new;
6607 
6608 	/* Zero out spare memory. */
6609 	if (want_init_on_alloc(flags)) {
6610 		kasan_disable_current();
6611 		if (orig_size && orig_size < new_size)
6612 			memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size);
6613 		else
6614 			memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
6615 		kasan_enable_current();
6616 	}
6617 
6618 	/* Setup kmalloc redzone when needed */
6619 	if (s && slub_debug_orig_size(s)) {
6620 		set_orig_size(s, (void *)p, new_size);
6621 		if (s->flags & SLAB_RED_ZONE && new_size < ks)
6622 			memset_no_sanitize_memory(kasan_reset_tag(p) + new_size,
6623 						SLUB_RED_ACTIVE, ks - new_size);
6624 	}
6625 
6626 	p = kasan_krealloc(p, new_size, flags);
6627 	return (void *)p;
6628 
6629 alloc_new:
6630 	ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
6631 	if (ret && p) {
6632 		/* Disable KASAN checks as the object's redzone is accessed. */
6633 		kasan_disable_current();
6634 		memcpy(ret, kasan_reset_tag(p), orig_size ?: ks);
6635 		kasan_enable_current();
6636 	}
6637 
6638 	return ret;
6639 }
6640 
6641 /**
6642  * krealloc_node_align - reallocate memory. The contents will remain unchanged.
6643  * @p: object to reallocate memory for.
6644  * @new_size: how many bytes of memory are required.
6645  * @align: desired alignment.
6646  * @flags: the type of memory to allocate.
6647  * @nid: NUMA node or NUMA_NO_NODE
6648  *
6649  * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
6650  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
6651  *
6652  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6653  * Documentation/core-api/memory-allocation.rst for more details.
6654  *
6655  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
6656  * initial memory allocation, every subsequent call to this API for the same
6657  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
6658  * __GFP_ZERO is not fully honored by this API.
6659  *
6660  * When slub_debug_orig_size() is off, krealloc() only knows about the bucket
6661  * size of an allocation (but not the exact size it was allocated with) and
6662  * hence implements the following semantics for shrinking and growing buffers
6663  * with __GFP_ZERO::
6664  *
6665  *           new             bucket
6666  *   0       size             size
6667  *   |--------|----------------|
6668  *   |  keep  |      zero      |
6669  *
6670  * Otherwise, the original allocation size 'orig_size' could be used to
6671  * precisely clear the requested size, and the new size will also be stored
6672  * as the new 'orig_size'.
6673  *
6674  * In any case, the contents of the object pointed to are preserved up to the
6675  * lesser of the new and old sizes.
6676  *
6677  * Return: pointer to the allocated memory or %NULL in case of error
6678  */
6679 void *krealloc_node_align_noprof(const void *p, size_t new_size, unsigned long align,
6680 				 gfp_t flags, int nid)
6681 {
6682 	void *ret;
6683 
6684 	if (unlikely(!new_size)) {
6685 		kfree(p);
6686 		return ZERO_SIZE_PTR;
6687 	}
6688 
6689 	ret = __do_krealloc(p, new_size, align, flags, nid);
6690 	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
6691 		kfree(p);
6692 
6693 	return ret;
6694 }
6695 EXPORT_SYMBOL(krealloc_node_align_noprof);
6696 
6697 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
6698 {
6699 	/*
6700 	 * We want to attempt a large physically contiguous block first because
6701 	 * it is less likely to fragment multiple larger blocks and therefore
6702 	 * contribute to a long term fragmentation less than vmalloc fallback.
6703 	 * However make sure that larger requests are not too disruptive - i.e.
6704 	 * do not direct reclaim unless physically continuous memory is preferred
6705 	 * (__GFP_RETRY_MAYFAIL mode). We still kick in kswapd/kcompactd to
6706 	 * start working in the background
6707 	 */
6708 	if (size > PAGE_SIZE) {
6709 		flags |= __GFP_NOWARN;
6710 
6711 		if (!(flags & __GFP_RETRY_MAYFAIL))
6712 			flags &= ~__GFP_DIRECT_RECLAIM;
6713 
6714 		/* nofail semantic is implemented by the vmalloc fallback */
6715 		flags &= ~__GFP_NOFAIL;
6716 	}
6717 
6718 	return flags;
6719 }
6720 
6721 /**
6722  * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
6723  * failure, fall back to non-contiguous (vmalloc) allocation.
6724  * @size: size of the request.
6725  * @b: which set of kmalloc buckets to allocate from.
6726  * @align: desired alignment.
6727  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
6728  * @node: numa node to allocate from
6729  *
6730  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6731  * Documentation/core-api/memory-allocation.rst for more details.
6732  *
6733  * Uses kmalloc to get the memory but if the allocation fails then falls back
6734  * to the vmalloc allocator. Use kvfree for freeing the memory.
6735  *
6736  * GFP_NOWAIT and GFP_ATOMIC are supported, the __GFP_NORETRY modifier is not.
6737  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
6738  * preferable to the vmalloc fallback, due to visible performance drawbacks.
6739  *
6740  * Return: pointer to the allocated memory of %NULL in case of failure
6741  */
6742 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
6743 			     gfp_t flags, int node)
6744 {
6745 	bool allow_block;
6746 	void *ret;
6747 
6748 	/*
6749 	 * It doesn't really make sense to fallback to vmalloc for sub page
6750 	 * requests
6751 	 */
6752 	ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b),
6753 				kmalloc_gfp_adjust(flags, size),
6754 				node, _RET_IP_);
6755 	if (ret || size <= PAGE_SIZE)
6756 		return ret;
6757 
6758 	/* Don't even allow crazy sizes */
6759 	if (unlikely(size > INT_MAX)) {
6760 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
6761 		return NULL;
6762 	}
6763 
6764 	/*
6765 	 * For non-blocking the VM_ALLOW_HUGE_VMAP is not used
6766 	 * because the huge-mapping path in vmalloc contains at
6767 	 * least one might_sleep() call.
6768 	 *
6769 	 * TODO: Revise huge-mapping path to support non-blocking
6770 	 * flags.
6771 	 */
6772 	allow_block = gfpflags_allow_blocking(flags);
6773 
6774 	/*
6775 	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
6776 	 * since the callers already cannot assume anything
6777 	 * about the resulting pointer, and cannot play
6778 	 * protection games.
6779 	 */
6780 	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
6781 			flags, PAGE_KERNEL, allow_block ? VM_ALLOW_HUGE_VMAP:0,
6782 			node, __builtin_return_address(0));
6783 }
6784 EXPORT_SYMBOL(__kvmalloc_node_noprof);
6785 
6786 /**
6787  * kvfree() - Free memory.
6788  * @addr: Pointer to allocated memory.
6789  *
6790  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
6791  * It is slightly more efficient to use kfree() or vfree() if you are certain
6792  * that you know which one to use.
6793  *
6794  * Context: Either preemptible task context or not-NMI interrupt.
6795  */
6796 void kvfree(const void *addr)
6797 {
6798 	if (is_vmalloc_addr(addr))
6799 		vfree(addr);
6800 	else
6801 		kfree(addr);
6802 }
6803 EXPORT_SYMBOL(kvfree);
6804 
6805 /**
6806  * kvfree_sensitive - Free a data object containing sensitive information.
6807  * @addr: address of the data object to be freed.
6808  * @len: length of the data object.
6809  *
6810  * Use the special memzero_explicit() function to clear the content of a
6811  * kvmalloc'ed object containing sensitive data to make sure that the
6812  * compiler won't optimize out the data clearing.
6813  */
6814 void kvfree_sensitive(const void *addr, size_t len)
6815 {
6816 	if (likely(!ZERO_OR_NULL_PTR(addr))) {
6817 		memzero_explicit((void *)addr, len);
6818 		kvfree(addr);
6819 	}
6820 }
6821 EXPORT_SYMBOL(kvfree_sensitive);
6822 
6823 /**
6824  * kvrealloc_node_align - reallocate memory; contents remain unchanged
6825  * @p: object to reallocate memory for
6826  * @size: the size to reallocate
6827  * @align: desired alignment
6828  * @flags: the flags for the page level allocator
6829  * @nid: NUMA node id
6830  *
6831  * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
6832  * and @p is not a %NULL pointer, the object pointed to is freed.
6833  *
6834  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6835  * Documentation/core-api/memory-allocation.rst for more details.
6836  *
6837  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
6838  * initial memory allocation, every subsequent call to this API for the same
6839  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
6840  * __GFP_ZERO is not fully honored by this API.
6841  *
6842  * In any case, the contents of the object pointed to are preserved up to the
6843  * lesser of the new and old sizes.
6844  *
6845  * This function must not be called concurrently with itself or kvfree() for the
6846  * same memory allocation.
6847  *
6848  * Return: pointer to the allocated memory or %NULL in case of error
6849  */
6850 void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
6851 				  gfp_t flags, int nid)
6852 {
6853 	void *n;
6854 
6855 	if (is_vmalloc_addr(p))
6856 		return vrealloc_node_align_noprof(p, size, align, flags, nid);
6857 
6858 	n = krealloc_node_align_noprof(p, size, align, kmalloc_gfp_adjust(flags, size), nid);
6859 	if (!n) {
6860 		/* We failed to krealloc(), fall back to kvmalloc(). */
6861 		n = kvmalloc_node_align_noprof(size, align, flags, nid);
6862 		if (!n)
6863 			return NULL;
6864 
6865 		if (p) {
6866 			/* We already know that `p` is not a vmalloc address. */
6867 			kasan_disable_current();
6868 			memcpy(n, kasan_reset_tag(p), ksize(p));
6869 			kasan_enable_current();
6870 
6871 			kfree(p);
6872 		}
6873 	}
6874 
6875 	return n;
6876 }
6877 EXPORT_SYMBOL(kvrealloc_node_align_noprof);
6878 
6879 struct detached_freelist {
6880 	struct slab *slab;
6881 	void *tail;
6882 	void *freelist;
6883 	int cnt;
6884 	struct kmem_cache *s;
6885 };
6886 
6887 /*
6888  * This function progressively scans the array with free objects (with
6889  * a limited look ahead) and extract objects belonging to the same
6890  * slab.  It builds a detached freelist directly within the given
6891  * slab/objects.  This can happen without any need for
6892  * synchronization, because the objects are owned by running process.
6893  * The freelist is build up as a single linked list in the objects.
6894  * The idea is, that this detached freelist can then be bulk
6895  * transferred to the real freelist(s), but only requiring a single
6896  * synchronization primitive.  Look ahead in the array is limited due
6897  * to performance reasons.
6898  */
6899 static inline
6900 int build_detached_freelist(struct kmem_cache *s, size_t size,
6901 			    void **p, struct detached_freelist *df)
6902 {
6903 	int lookahead = 3;
6904 	void *object;
6905 	struct page *page;
6906 	struct slab *slab;
6907 	size_t same;
6908 
6909 	object = p[--size];
6910 	page = virt_to_page(object);
6911 	slab = page_slab(page);
6912 	if (!s) {
6913 		/* Handle kalloc'ed objects */
6914 		if (!slab) {
6915 			free_large_kmalloc(page, object);
6916 			df->slab = NULL;
6917 			return size;
6918 		}
6919 		/* Derive kmem_cache from object */
6920 		df->slab = slab;
6921 		df->s = slab->slab_cache;
6922 	} else {
6923 		df->slab = slab;
6924 		df->s = s;
6925 	}
6926 
6927 	/* Start new detached freelist */
6928 	df->tail = object;
6929 	df->freelist = object;
6930 	df->cnt = 1;
6931 
6932 	if (is_kfence_address(object))
6933 		return size;
6934 
6935 	set_freepointer(df->s, object, NULL);
6936 
6937 	same = size;
6938 	while (size) {
6939 		object = p[--size];
6940 		/* df->slab is always set at this point */
6941 		if (df->slab == virt_to_slab(object)) {
6942 			/* Opportunity build freelist */
6943 			set_freepointer(df->s, object, df->freelist);
6944 			df->freelist = object;
6945 			df->cnt++;
6946 			same--;
6947 			if (size != same)
6948 				swap(p[size], p[same]);
6949 			continue;
6950 		}
6951 
6952 		/* Limit look ahead search */
6953 		if (!--lookahead)
6954 			break;
6955 	}
6956 
6957 	return same;
6958 }
6959 
6960 /*
6961  * Internal bulk free of objects that were not initialised by the post alloc
6962  * hooks and thus should not be processed by the free hooks
6963  */
6964 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
6965 {
6966 	if (!size)
6967 		return;
6968 
6969 	do {
6970 		struct detached_freelist df;
6971 
6972 		size = build_detached_freelist(s, size, p, &df);
6973 		if (!df.slab)
6974 			continue;
6975 
6976 		if (kfence_free(df.freelist))
6977 			continue;
6978 
6979 		__slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
6980 			     _RET_IP_);
6981 	} while (likely(size));
6982 }
6983 
6984 /* Note that interrupts must be enabled when calling this function. */
6985 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
6986 {
6987 	if (!size)
6988 		return;
6989 
6990 	/*
6991 	 * freeing to sheaves is so incompatible with the detached freelist so
6992 	 * once we go that way, we have to do everything differently
6993 	 */
6994 	if (s && cache_has_sheaves(s)) {
6995 		free_to_pcs_bulk(s, size, p);
6996 		return;
6997 	}
6998 
6999 	do {
7000 		struct detached_freelist df;
7001 
7002 		size = build_detached_freelist(s, size, p, &df);
7003 		if (!df.slab)
7004 			continue;
7005 
7006 		slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
7007 			       df.cnt, _RET_IP_);
7008 	} while (likely(size));
7009 }
7010 EXPORT_SYMBOL(kmem_cache_free_bulk);
7011 
7012 static unsigned int
7013 __refill_objects_node(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7014 		      unsigned int max, struct kmem_cache_node *n,
7015 		      bool allow_spin)
7016 {
7017 	struct partial_bulk_context pc;
7018 	struct slab *slab, *slab2;
7019 	unsigned int refilled = 0;
7020 	unsigned long flags;
7021 	void *object;
7022 
7023 	pc.flags = gfp;
7024 	pc.min_objects = min;
7025 	pc.max_objects = max;
7026 
7027 	if (!get_partial_node_bulk(s, n, &pc, allow_spin))
7028 		return 0;
7029 
7030 	list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7031 
7032 		list_del(&slab->slab_list);
7033 
7034 		object = get_freelist_nofreeze(s, slab);
7035 
7036 		while (object && refilled < max) {
7037 			p[refilled] = object;
7038 			object = get_freepointer(s, object);
7039 			maybe_wipe_obj_freeptr(s, p[refilled]);
7040 
7041 			refilled++;
7042 		}
7043 
7044 		/*
7045 		 * Freelist had more objects than we can accommodate, we need to
7046 		 * free them back. We can treat it like a detached freelist, just
7047 		 * need to find the tail object.
7048 		 */
7049 		if (unlikely(object)) {
7050 			void *head = object;
7051 			void *tail;
7052 			int cnt = 0;
7053 
7054 			do {
7055 				tail = object;
7056 				cnt++;
7057 				object = get_freepointer(s, object);
7058 			} while (object);
7059 			__slab_free(s, slab, head, tail, cnt, _RET_IP_);
7060 		}
7061 
7062 		if (refilled >= max)
7063 			break;
7064 	}
7065 
7066 	if (unlikely(!list_empty(&pc.slabs))) {
7067 		spin_lock_irqsave(&n->list_lock, flags);
7068 
7069 		list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7070 
7071 			if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial))
7072 				continue;
7073 
7074 			list_del(&slab->slab_list);
7075 			add_partial(n, slab, ADD_TO_HEAD);
7076 		}
7077 
7078 		spin_unlock_irqrestore(&n->list_lock, flags);
7079 
7080 		/* any slabs left are completely free and for discard */
7081 		list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7082 
7083 			list_del(&slab->slab_list);
7084 			discard_slab(s, slab);
7085 		}
7086 	}
7087 
7088 	return refilled;
7089 }
7090 
7091 #ifdef CONFIG_NUMA
7092 static unsigned int
7093 __refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7094 		     unsigned int max)
7095 {
7096 	struct zonelist *zonelist;
7097 	struct zoneref *z;
7098 	struct zone *zone;
7099 	enum zone_type highest_zoneidx = gfp_zone(gfp);
7100 	unsigned int cpuset_mems_cookie;
7101 	unsigned int refilled = 0;
7102 
7103 	/* see get_from_any_partial() for the defrag ratio description */
7104 	if (!s->remote_node_defrag_ratio ||
7105 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
7106 		return 0;
7107 
7108 	do {
7109 		cpuset_mems_cookie = read_mems_allowed_begin();
7110 		zonelist = node_zonelist(mempolicy_slab_node(), gfp);
7111 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
7112 			struct kmem_cache_node *n;
7113 			unsigned int r;
7114 
7115 			n = get_node(s, zone_to_nid(zone));
7116 
7117 			if (!n || !cpuset_zone_allowed(zone, gfp) ||
7118 					n->nr_partial <= s->min_partial)
7119 				continue;
7120 
7121 			r = __refill_objects_node(s, p, gfp, min, max, n,
7122 						  /* allow_spin = */ false);
7123 			refilled += r;
7124 
7125 			if (r >= min) {
7126 				/*
7127 				 * Don't check read_mems_allowed_retry() here -
7128 				 * if mems_allowed was updated in parallel, that
7129 				 * was a harmless race between allocation and
7130 				 * the cpuset update
7131 				 */
7132 				return refilled;
7133 			}
7134 			p += r;
7135 			min -= r;
7136 			max -= r;
7137 		}
7138 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
7139 
7140 	return refilled;
7141 }
7142 #else
7143 static inline unsigned int
7144 __refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7145 		     unsigned int max)
7146 {
7147 	return 0;
7148 }
7149 #endif
7150 
7151 static unsigned int
7152 refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7153 	       unsigned int max)
7154 {
7155 	int local_node = numa_mem_id();
7156 	unsigned int refilled;
7157 	struct slab *slab;
7158 
7159 	if (WARN_ON_ONCE(!gfpflags_allow_spinning(gfp)))
7160 		return 0;
7161 
7162 	refilled = __refill_objects_node(s, p, gfp, min, max,
7163 					 get_node(s, local_node),
7164 					 /* allow_spin = */ true);
7165 	if (refilled >= min)
7166 		return refilled;
7167 
7168 	refilled += __refill_objects_any(s, p + refilled, gfp, min - refilled,
7169 					 max - refilled);
7170 	if (refilled >= min)
7171 		return refilled;
7172 
7173 new_slab:
7174 
7175 	slab = new_slab(s, gfp, local_node);
7176 	if (!slab)
7177 		goto out;
7178 
7179 	stat(s, ALLOC_SLAB);
7180 
7181 	/*
7182 	 * TODO: possible optimization - if we know we will consume the whole
7183 	 * slab we might skip creating the freelist?
7184 	 */
7185 	refilled += alloc_from_new_slab(s, slab, p + refilled, max - refilled,
7186 					/* allow_spin = */ true);
7187 
7188 	if (refilled < min)
7189 		goto new_slab;
7190 
7191 out:
7192 	return refilled;
7193 }
7194 
7195 static inline
7196 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
7197 			    void **p)
7198 {
7199 	int i;
7200 
7201 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
7202 		for (i = 0; i < size; i++) {
7203 
7204 			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_,
7205 					     s->object_size);
7206 			if (unlikely(!p[i]))
7207 				goto error;
7208 
7209 			maybe_wipe_obj_freeptr(s, p[i]);
7210 		}
7211 	} else {
7212 		i = refill_objects(s, p, flags, size, size);
7213 		if (i < size)
7214 			goto error;
7215 		stat_add(s, ALLOC_SLOWPATH, i);
7216 	}
7217 
7218 	return i;
7219 
7220 error:
7221 	__kmem_cache_free_bulk(s, i, p);
7222 	return 0;
7223 
7224 }
7225 
7226 /*
7227  * Note that interrupts must be enabled when calling this function and gfp
7228  * flags must allow spinning.
7229  */
7230 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
7231 				 void **p)
7232 {
7233 	unsigned int i = 0;
7234 	void *kfence_obj;
7235 
7236 	if (!size)
7237 		return 0;
7238 
7239 	s = slab_pre_alloc_hook(s, flags);
7240 	if (unlikely(!s))
7241 		return 0;
7242 
7243 	/*
7244 	 * to make things simpler, only assume at most once kfence allocated
7245 	 * object per bulk allocation and choose its index randomly
7246 	 */
7247 	kfence_obj = kfence_alloc(s, s->object_size, flags);
7248 
7249 	if (unlikely(kfence_obj)) {
7250 		if (unlikely(size == 1)) {
7251 			p[0] = kfence_obj;
7252 			goto out;
7253 		}
7254 		size--;
7255 	}
7256 
7257 	i = alloc_from_pcs_bulk(s, flags, size, p);
7258 
7259 	if (i < size) {
7260 		/*
7261 		 * If we ran out of memory, don't bother with freeing back to
7262 		 * the percpu sheaves, we have bigger problems.
7263 		 */
7264 		if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) {
7265 			if (i > 0)
7266 				__kmem_cache_free_bulk(s, i, p);
7267 			if (kfence_obj)
7268 				__kfence_free(kfence_obj);
7269 			return 0;
7270 		}
7271 	}
7272 
7273 	if (unlikely(kfence_obj)) {
7274 		int idx = get_random_u32_below(size + 1);
7275 
7276 		if (idx != size)
7277 			p[size] = p[idx];
7278 		p[idx] = kfence_obj;
7279 
7280 		size++;
7281 	}
7282 
7283 out:
7284 	/*
7285 	 * memcg and kmem_cache debug support and memory initialization.
7286 	 * Done outside of the IRQ disabled fastpath loop.
7287 	 */
7288 	if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
7289 		    slab_want_init_on_alloc(flags, s), s->object_size))) {
7290 		return 0;
7291 	}
7292 
7293 	return size;
7294 }
7295 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
7296 
7297 /*
7298  * Object placement in a slab is made very easy because we always start at
7299  * offset 0. If we tune the size of the object to the alignment then we can
7300  * get the required alignment by putting one properly sized object after
7301  * another.
7302  *
7303  * Notice that the allocation order determines the sizes of the per cpu
7304  * caches. Each processor has always one slab available for allocations.
7305  * Increasing the allocation order reduces the number of times that slabs
7306  * must be moved on and off the partial lists and is therefore a factor in
7307  * locking overhead.
7308  */
7309 
7310 /*
7311  * Minimum / Maximum order of slab pages. This influences locking overhead
7312  * and slab fragmentation. A higher order reduces the number of partial slabs
7313  * and increases the number of allocations possible without having to
7314  * take the list_lock.
7315  */
7316 static unsigned int slub_min_order;
7317 static unsigned int slub_max_order =
7318 	IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
7319 static unsigned int slub_min_objects;
7320 
7321 /*
7322  * Calculate the order of allocation given an slab object size.
7323  *
7324  * The order of allocation has significant impact on performance and other
7325  * system components. Generally order 0 allocations should be preferred since
7326  * order 0 does not cause fragmentation in the page allocator. Larger objects
7327  * be problematic to put into order 0 slabs because there may be too much
7328  * unused space left. We go to a higher order if more than 1/16th of the slab
7329  * would be wasted.
7330  *
7331  * In order to reach satisfactory performance we must ensure that a minimum
7332  * number of objects is in one slab. Otherwise we may generate too much
7333  * activity on the partial lists which requires taking the list_lock. This is
7334  * less a concern for large slabs though which are rarely used.
7335  *
7336  * slab_max_order specifies the order where we begin to stop considering the
7337  * number of objects in a slab as critical. If we reach slab_max_order then
7338  * we try to keep the page order as low as possible. So we accept more waste
7339  * of space in favor of a small page order.
7340  *
7341  * Higher order allocations also allow the placement of more objects in a
7342  * slab and thereby reduce object handling overhead. If the user has
7343  * requested a higher minimum order then we start with that one instead of
7344  * the smallest order which will fit the object.
7345  */
7346 static inline unsigned int calc_slab_order(unsigned int size,
7347 		unsigned int min_order, unsigned int max_order,
7348 		unsigned int fract_leftover)
7349 {
7350 	unsigned int order;
7351 
7352 	for (order = min_order; order <= max_order; order++) {
7353 
7354 		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
7355 		unsigned int rem;
7356 
7357 		rem = slab_size % size;
7358 
7359 		if (rem <= slab_size / fract_leftover)
7360 			break;
7361 	}
7362 
7363 	return order;
7364 }
7365 
7366 static inline int calculate_order(unsigned int size)
7367 {
7368 	unsigned int order;
7369 	unsigned int min_objects;
7370 	unsigned int max_objects;
7371 	unsigned int min_order;
7372 
7373 	min_objects = slub_min_objects;
7374 	if (!min_objects) {
7375 		/*
7376 		 * Some architectures will only update present cpus when
7377 		 * onlining them, so don't trust the number if it's just 1. But
7378 		 * we also don't want to use nr_cpu_ids always, as on some other
7379 		 * architectures, there can be many possible cpus, but never
7380 		 * onlined. Here we compromise between trying to avoid too high
7381 		 * order on systems that appear larger than they are, and too
7382 		 * low order on systems that appear smaller than they are.
7383 		 */
7384 		unsigned int nr_cpus = num_present_cpus();
7385 		if (nr_cpus <= 1)
7386 			nr_cpus = nr_cpu_ids;
7387 		min_objects = 4 * (fls(nr_cpus) + 1);
7388 	}
7389 	/* min_objects can't be 0 because get_order(0) is undefined */
7390 	max_objects = max(order_objects(slub_max_order, size), 1U);
7391 	min_objects = min(min_objects, max_objects);
7392 
7393 	min_order = max_t(unsigned int, slub_min_order,
7394 			  get_order(min_objects * size));
7395 	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
7396 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
7397 
7398 	/*
7399 	 * Attempt to find best configuration for a slab. This works by first
7400 	 * attempting to generate a layout with the best possible configuration
7401 	 * and backing off gradually.
7402 	 *
7403 	 * We start with accepting at most 1/16 waste and try to find the
7404 	 * smallest order from min_objects-derived/slab_min_order up to
7405 	 * slab_max_order that will satisfy the constraint. Note that increasing
7406 	 * the order can only result in same or less fractional waste, not more.
7407 	 *
7408 	 * If that fails, we increase the acceptable fraction of waste and try
7409 	 * again. The last iteration with fraction of 1/2 would effectively
7410 	 * accept any waste and give us the order determined by min_objects, as
7411 	 * long as at least single object fits within slab_max_order.
7412 	 */
7413 	for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
7414 		order = calc_slab_order(size, min_order, slub_max_order,
7415 					fraction);
7416 		if (order <= slub_max_order)
7417 			return order;
7418 	}
7419 
7420 	/*
7421 	 * Doh this slab cannot be placed using slab_max_order.
7422 	 */
7423 	order = get_order(size);
7424 	if (order <= MAX_PAGE_ORDER)
7425 		return order;
7426 	return -ENOSYS;
7427 }
7428 
7429 static void
7430 init_kmem_cache_node(struct kmem_cache_node *n, struct node_barn *barn)
7431 {
7432 	n->nr_partial = 0;
7433 	spin_lock_init(&n->list_lock);
7434 	INIT_LIST_HEAD(&n->partial);
7435 #ifdef CONFIG_SLUB_DEBUG
7436 	atomic_long_set(&n->nr_slabs, 0);
7437 	atomic_long_set(&n->total_objects, 0);
7438 	INIT_LIST_HEAD(&n->full);
7439 #endif
7440 	n->barn = barn;
7441 	if (barn)
7442 		barn_init(barn);
7443 }
7444 
7445 #ifdef CONFIG_SLUB_STATS
7446 static inline int alloc_kmem_cache_stats(struct kmem_cache *s)
7447 {
7448 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
7449 			NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
7450 			sizeof(struct kmem_cache_stats));
7451 
7452 	s->cpu_stats = alloc_percpu(struct kmem_cache_stats);
7453 
7454 	if (!s->cpu_stats)
7455 		return 0;
7456 
7457 	return 1;
7458 }
7459 #endif
7460 
7461 static int init_percpu_sheaves(struct kmem_cache *s)
7462 {
7463 	static struct slab_sheaf bootstrap_sheaf = {};
7464 	int cpu;
7465 
7466 	for_each_possible_cpu(cpu) {
7467 		struct slub_percpu_sheaves *pcs;
7468 
7469 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
7470 
7471 		local_trylock_init(&pcs->lock);
7472 
7473 		/*
7474 		 * Bootstrap sheaf has zero size so fast-path allocation fails.
7475 		 * It has also size == s->sheaf_capacity, so fast-path free
7476 		 * fails. In the slow paths we recognize the situation by
7477 		 * checking s->sheaf_capacity. This allows fast paths to assume
7478 		 * s->cpu_sheaves and pcs->main always exists and are valid.
7479 		 * It's also safe to share the single static bootstrap_sheaf
7480 		 * with zero-sized objects array as it's never modified.
7481 		 *
7482 		 * Bootstrap_sheaf also has NULL pointer to kmem_cache so we
7483 		 * recognize it and not attempt to free it when destroying the
7484 		 * cache.
7485 		 *
7486 		 * We keep bootstrap_sheaf for kmem_cache and kmem_cache_node,
7487 		 * caches with debug enabled, and all caches with SLUB_TINY.
7488 		 * For kmalloc caches it's used temporarily during the initial
7489 		 * bootstrap.
7490 		 */
7491 		if (!s->sheaf_capacity)
7492 			pcs->main = &bootstrap_sheaf;
7493 		else
7494 			pcs->main = alloc_empty_sheaf(s, GFP_KERNEL);
7495 
7496 		if (!pcs->main)
7497 			return -ENOMEM;
7498 	}
7499 
7500 	return 0;
7501 }
7502 
7503 static struct kmem_cache *kmem_cache_node;
7504 
7505 /*
7506  * No kmalloc_node yet so do it by hand. We know that this is the first
7507  * slab on the node for this slabcache. There are no concurrent accesses
7508  * possible.
7509  *
7510  * Note that this function only works on the kmem_cache_node
7511  * when allocating for the kmem_cache_node. This is used for bootstrapping
7512  * memory on a fresh node that has no slab structures yet.
7513  */
7514 static void early_kmem_cache_node_alloc(int node)
7515 {
7516 	struct slab *slab;
7517 	struct kmem_cache_node *n;
7518 
7519 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
7520 
7521 	slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
7522 
7523 	BUG_ON(!slab);
7524 	if (slab_nid(slab) != node) {
7525 		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
7526 		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
7527 	}
7528 
7529 	n = slab->freelist;
7530 	BUG_ON(!n);
7531 #ifdef CONFIG_SLUB_DEBUG
7532 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
7533 #endif
7534 	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
7535 	slab->freelist = get_freepointer(kmem_cache_node, n);
7536 	slab->inuse = 1;
7537 	kmem_cache_node->node[node] = n;
7538 	init_kmem_cache_node(n, NULL);
7539 	inc_slabs_node(kmem_cache_node, node, slab->objects);
7540 
7541 	/*
7542 	 * No locks need to be taken here as it has just been
7543 	 * initialized and there is no concurrent access.
7544 	 */
7545 	__add_partial(n, slab, ADD_TO_HEAD);
7546 }
7547 
7548 static void free_kmem_cache_nodes(struct kmem_cache *s)
7549 {
7550 	int node;
7551 	struct kmem_cache_node *n;
7552 
7553 	for_each_kmem_cache_node(s, node, n) {
7554 		if (n->barn) {
7555 			WARN_ON(n->barn->nr_full);
7556 			WARN_ON(n->barn->nr_empty);
7557 			kfree(n->barn);
7558 			n->barn = NULL;
7559 		}
7560 
7561 		s->node[node] = NULL;
7562 		kmem_cache_free(kmem_cache_node, n);
7563 	}
7564 }
7565 
7566 void __kmem_cache_release(struct kmem_cache *s)
7567 {
7568 	cache_random_seq_destroy(s);
7569 	pcs_destroy(s);
7570 #ifdef CONFIG_SLUB_STATS
7571 	free_percpu(s->cpu_stats);
7572 #endif
7573 	free_kmem_cache_nodes(s);
7574 }
7575 
7576 static int init_kmem_cache_nodes(struct kmem_cache *s)
7577 {
7578 	int node;
7579 
7580 	for_each_node_mask(node, slab_nodes) {
7581 		struct kmem_cache_node *n;
7582 		struct node_barn *barn = NULL;
7583 
7584 		if (slab_state == DOWN) {
7585 			early_kmem_cache_node_alloc(node);
7586 			continue;
7587 		}
7588 
7589 		if (cache_has_sheaves(s)) {
7590 			barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
7591 
7592 			if (!barn)
7593 				return 0;
7594 		}
7595 
7596 		n = kmem_cache_alloc_node(kmem_cache_node,
7597 						GFP_KERNEL, node);
7598 		if (!n) {
7599 			kfree(barn);
7600 			return 0;
7601 		}
7602 
7603 		init_kmem_cache_node(n, barn);
7604 
7605 		s->node[node] = n;
7606 	}
7607 	return 1;
7608 }
7609 
7610 static unsigned int calculate_sheaf_capacity(struct kmem_cache *s,
7611 					     struct kmem_cache_args *args)
7612 
7613 {
7614 	unsigned int capacity;
7615 	size_t size;
7616 
7617 
7618 	if (IS_ENABLED(CONFIG_SLUB_TINY) || s->flags & SLAB_DEBUG_FLAGS)
7619 		return 0;
7620 
7621 	/*
7622 	 * Bootstrap caches can't have sheaves for now (SLAB_NO_OBJ_EXT).
7623 	 * SLAB_NOLEAKTRACE caches (e.g., kmemleak's object_cache) must not
7624 	 * have sheaves to avoid recursion when sheaf allocation triggers
7625 	 * kmemleak tracking.
7626 	 */
7627 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
7628 		return 0;
7629 
7630 	/*
7631 	 * For now we use roughly similar formula (divided by two as there are
7632 	 * two percpu sheaves) as what was used for percpu partial slabs, which
7633 	 * should result in similar lock contention (barn or list_lock)
7634 	 */
7635 	if (s->size >= PAGE_SIZE)
7636 		capacity = 4;
7637 	else if (s->size >= 1024)
7638 		capacity = 12;
7639 	else if (s->size >= 256)
7640 		capacity = 26;
7641 	else
7642 		capacity = 60;
7643 
7644 	/* Increment capacity to make sheaf exactly a kmalloc size bucket */
7645 	size = struct_size_t(struct slab_sheaf, objects, capacity);
7646 	size = kmalloc_size_roundup(size);
7647 	capacity = (size - struct_size_t(struct slab_sheaf, objects, 0)) / sizeof(void *);
7648 
7649 	/*
7650 	 * Respect an explicit request for capacity that's typically motivated by
7651 	 * expected maximum size of kmem_cache_prefill_sheaf() to not end up
7652 	 * using low-performance oversize sheaves
7653 	 */
7654 	return max(capacity, args->sheaf_capacity);
7655 }
7656 
7657 /*
7658  * calculate_sizes() determines the order and the distribution of data within
7659  * a slab object.
7660  */
7661 static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
7662 {
7663 	slab_flags_t flags = s->flags;
7664 	unsigned int size = s->object_size;
7665 	unsigned int aligned_size;
7666 	unsigned int order;
7667 
7668 	/*
7669 	 * Round up object size to the next word boundary. We can only
7670 	 * place the free pointer at word boundaries and this determines
7671 	 * the possible location of the free pointer.
7672 	 */
7673 	size = ALIGN(size, sizeof(void *));
7674 
7675 #ifdef CONFIG_SLUB_DEBUG
7676 	/*
7677 	 * Determine if we can poison the object itself. If the user of
7678 	 * the slab may touch the object after free or before allocation
7679 	 * then we should never poison the object itself.
7680 	 */
7681 	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
7682 			!s->ctor)
7683 		s->flags |= __OBJECT_POISON;
7684 	else
7685 		s->flags &= ~__OBJECT_POISON;
7686 
7687 
7688 	/*
7689 	 * If we are Redzoning and there is no space between the end of the
7690 	 * object and the following fields, add one word so the right Redzone
7691 	 * is non-empty.
7692 	 */
7693 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
7694 		size += sizeof(void *);
7695 #endif
7696 
7697 	/*
7698 	 * With that we have determined the number of bytes in actual use
7699 	 * by the object and redzoning.
7700 	 */
7701 	s->inuse = size;
7702 
7703 	if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
7704 	    (flags & SLAB_POISON) ||
7705 	    (s->ctor && !args->use_freeptr_offset) ||
7706 	    ((flags & SLAB_RED_ZONE) &&
7707 	     (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
7708 		/*
7709 		 * Relocate free pointer after the object if it is not
7710 		 * permitted to overwrite the first word of the object on
7711 		 * kmem_cache_free.
7712 		 *
7713 		 * This is the case if we do RCU, have a constructor, are
7714 		 * poisoning the objects, or are redzoning an object smaller
7715 		 * than sizeof(void *) or are redzoning an object with
7716 		 * slub_debug_orig_size() enabled, in which case the right
7717 		 * redzone may be extended.
7718 		 *
7719 		 * The assumption that s->offset >= s->inuse means free
7720 		 * pointer is outside of the object is used in the
7721 		 * freeptr_outside_object() function. If that is no
7722 		 * longer true, the function needs to be modified.
7723 		 */
7724 		s->offset = size;
7725 		size += sizeof(void *);
7726 	} else if (((flags & SLAB_TYPESAFE_BY_RCU) || s->ctor) &&
7727 			args->use_freeptr_offset) {
7728 		s->offset = args->freeptr_offset;
7729 	} else {
7730 		/*
7731 		 * Store freelist pointer near middle of object to keep
7732 		 * it away from the edges of the object to avoid small
7733 		 * sized over/underflows from neighboring allocations.
7734 		 */
7735 		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
7736 	}
7737 
7738 #ifdef CONFIG_SLUB_DEBUG
7739 	if (flags & SLAB_STORE_USER) {
7740 		/*
7741 		 * Need to store information about allocs and frees after
7742 		 * the object.
7743 		 */
7744 		size += 2 * sizeof(struct track);
7745 
7746 		/* Save the original kmalloc request size */
7747 		if (flags & SLAB_KMALLOC)
7748 			size += sizeof(unsigned long);
7749 	}
7750 #endif
7751 
7752 	kasan_cache_create(s, &size, &s->flags);
7753 #ifdef CONFIG_SLUB_DEBUG
7754 	if (flags & SLAB_RED_ZONE) {
7755 		/*
7756 		 * Add some empty padding so that we can catch
7757 		 * overwrites from earlier objects rather than let
7758 		 * tracking information or the free pointer be
7759 		 * corrupted if a user writes before the start
7760 		 * of the object.
7761 		 */
7762 		size += sizeof(void *);
7763 
7764 		s->red_left_pad = sizeof(void *);
7765 		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
7766 		size += s->red_left_pad;
7767 	}
7768 #endif
7769 
7770 	/*
7771 	 * SLUB stores one object immediately after another beginning from
7772 	 * offset 0. In order to align the objects we have to simply size
7773 	 * each object to conform to the alignment.
7774 	 */
7775 	aligned_size = ALIGN(size, s->align);
7776 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
7777 	if (slab_args_unmergeable(args, s->flags) &&
7778 			(aligned_size - size >= sizeof(struct slabobj_ext)))
7779 		s->flags |= SLAB_OBJ_EXT_IN_OBJ;
7780 #endif
7781 	size = aligned_size;
7782 
7783 	s->size = size;
7784 	s->reciprocal_size = reciprocal_value(size);
7785 	order = calculate_order(size);
7786 
7787 	if ((int)order < 0)
7788 		return 0;
7789 
7790 	s->allocflags = __GFP_COMP;
7791 
7792 	if (s->flags & SLAB_CACHE_DMA)
7793 		s->allocflags |= GFP_DMA;
7794 
7795 	if (s->flags & SLAB_CACHE_DMA32)
7796 		s->allocflags |= GFP_DMA32;
7797 
7798 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
7799 		s->allocflags |= __GFP_RECLAIMABLE;
7800 
7801 	/*
7802 	 * For KMALLOC_NORMAL caches we enable sheaves later by
7803 	 * bootstrap_kmalloc_sheaves() to avoid recursion
7804 	 */
7805 	if (!is_kmalloc_normal(s))
7806 		s->sheaf_capacity = calculate_sheaf_capacity(s, args);
7807 
7808 	/*
7809 	 * Determine the number of objects per slab
7810 	 */
7811 	s->oo = oo_make(order, size);
7812 	s->min = oo_make(get_order(size), size);
7813 
7814 	return !!oo_objects(s->oo);
7815 }
7816 
7817 static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
7818 {
7819 #ifdef CONFIG_SLUB_DEBUG
7820 	void *addr = slab_address(slab);
7821 	void *p;
7822 
7823 	if (!slab_add_kunit_errors())
7824 		slab_bug(s, "Objects remaining on __kmem_cache_shutdown()");
7825 
7826 	spin_lock(&object_map_lock);
7827 	__fill_map(object_map, s, slab);
7828 
7829 	for_each_object(p, s, addr, slab->objects) {
7830 
7831 		if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
7832 			if (slab_add_kunit_errors())
7833 				continue;
7834 			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
7835 			print_tracking(s, p);
7836 		}
7837 	}
7838 	spin_unlock(&object_map_lock);
7839 
7840 	__slab_err(slab);
7841 #endif
7842 }
7843 
7844 /*
7845  * Attempt to free all partial slabs on a node.
7846  * This is called from __kmem_cache_shutdown(). We must take list_lock
7847  * because sysfs file might still access partial list after the shutdowning.
7848  */
7849 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
7850 {
7851 	LIST_HEAD(discard);
7852 	struct slab *slab, *h;
7853 
7854 	BUG_ON(irqs_disabled());
7855 	spin_lock_irq(&n->list_lock);
7856 	list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
7857 		if (!slab->inuse) {
7858 			remove_partial(n, slab);
7859 			list_add(&slab->slab_list, &discard);
7860 		} else {
7861 			list_slab_objects(s, slab);
7862 		}
7863 	}
7864 	spin_unlock_irq(&n->list_lock);
7865 
7866 	list_for_each_entry_safe(slab, h, &discard, slab_list)
7867 		discard_slab(s, slab);
7868 }
7869 
7870 bool __kmem_cache_empty(struct kmem_cache *s)
7871 {
7872 	int node;
7873 	struct kmem_cache_node *n;
7874 
7875 	for_each_kmem_cache_node(s, node, n)
7876 		if (n->nr_partial || node_nr_slabs(n))
7877 			return false;
7878 	return true;
7879 }
7880 
7881 /*
7882  * Release all resources used by a slab cache.
7883  */
7884 int __kmem_cache_shutdown(struct kmem_cache *s)
7885 {
7886 	int node;
7887 	struct kmem_cache_node *n;
7888 
7889 	flush_all_cpus_locked(s);
7890 
7891 	/* we might have rcu sheaves in flight */
7892 	if (cache_has_sheaves(s))
7893 		rcu_barrier();
7894 
7895 	/* Attempt to free all objects */
7896 	for_each_kmem_cache_node(s, node, n) {
7897 		if (n->barn)
7898 			barn_shrink(s, n->barn);
7899 		free_partial(s, n);
7900 		if (n->nr_partial || node_nr_slabs(n))
7901 			return 1;
7902 	}
7903 	return 0;
7904 }
7905 
7906 #ifdef CONFIG_PRINTK
7907 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
7908 {
7909 	void *base;
7910 	int __maybe_unused i;
7911 	unsigned int objnr;
7912 	void *objp;
7913 	void *objp0;
7914 	struct kmem_cache *s = slab->slab_cache;
7915 	struct track __maybe_unused *trackp;
7916 
7917 	kpp->kp_ptr = object;
7918 	kpp->kp_slab = slab;
7919 	kpp->kp_slab_cache = s;
7920 	base = slab_address(slab);
7921 	objp0 = kasan_reset_tag(object);
7922 #ifdef CONFIG_SLUB_DEBUG
7923 	objp = restore_red_left(s, objp0);
7924 #else
7925 	objp = objp0;
7926 #endif
7927 	objnr = obj_to_index(s, slab, objp);
7928 	kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
7929 	objp = base + s->size * objnr;
7930 	kpp->kp_objp = objp;
7931 	if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
7932 			 || (objp - base) % s->size) ||
7933 	    !(s->flags & SLAB_STORE_USER))
7934 		return;
7935 #ifdef CONFIG_SLUB_DEBUG
7936 	objp = fixup_red_left(s, objp);
7937 	trackp = get_track(s, objp, TRACK_ALLOC);
7938 	kpp->kp_ret = (void *)trackp->addr;
7939 #ifdef CONFIG_STACKDEPOT
7940 	{
7941 		depot_stack_handle_t handle;
7942 		unsigned long *entries;
7943 		unsigned int nr_entries;
7944 
7945 		handle = READ_ONCE(trackp->handle);
7946 		if (handle) {
7947 			nr_entries = stack_depot_fetch(handle, &entries);
7948 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
7949 				kpp->kp_stack[i] = (void *)entries[i];
7950 		}
7951 
7952 		trackp = get_track(s, objp, TRACK_FREE);
7953 		handle = READ_ONCE(trackp->handle);
7954 		if (handle) {
7955 			nr_entries = stack_depot_fetch(handle, &entries);
7956 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
7957 				kpp->kp_free_stack[i] = (void *)entries[i];
7958 		}
7959 	}
7960 #endif
7961 #endif
7962 }
7963 #endif
7964 
7965 /********************************************************************
7966  *		Kmalloc subsystem
7967  *******************************************************************/
7968 
7969 static int __init setup_slub_min_order(const char *str, const struct kernel_param *kp)
7970 {
7971 	int ret;
7972 
7973 	ret = kstrtouint(str, 0, &slub_min_order);
7974 	if (ret)
7975 		return ret;
7976 
7977 	if (slub_min_order > slub_max_order)
7978 		slub_max_order = slub_min_order;
7979 
7980 	return 0;
7981 }
7982 
7983 static const struct kernel_param_ops param_ops_slab_min_order __initconst = {
7984 	.set = setup_slub_min_order,
7985 };
7986 __core_param_cb(slab_min_order, &param_ops_slab_min_order, &slub_min_order, 0);
7987 __core_param_cb(slub_min_order, &param_ops_slab_min_order, &slub_min_order, 0);
7988 
7989 static int __init setup_slub_max_order(const char *str, const struct kernel_param *kp)
7990 {
7991 	int ret;
7992 
7993 	ret = kstrtouint(str, 0, &slub_max_order);
7994 	if (ret)
7995 		return ret;
7996 
7997 	slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
7998 
7999 	if (slub_min_order > slub_max_order)
8000 		slub_min_order = slub_max_order;
8001 
8002 	return 0;
8003 }
8004 
8005 static const struct kernel_param_ops param_ops_slab_max_order __initconst = {
8006 	.set = setup_slub_max_order,
8007 };
8008 __core_param_cb(slab_max_order, &param_ops_slab_max_order, &slub_max_order, 0);
8009 __core_param_cb(slub_max_order, &param_ops_slab_max_order, &slub_max_order, 0);
8010 
8011 core_param(slab_min_objects, slub_min_objects, uint, 0);
8012 core_param(slub_min_objects, slub_min_objects, uint, 0);
8013 
8014 #ifdef CONFIG_NUMA
8015 static int __init setup_slab_strict_numa(const char *str, const struct kernel_param *kp)
8016 {
8017 	if (nr_node_ids > 1) {
8018 		static_branch_enable(&strict_numa);
8019 		pr_info("SLUB: Strict NUMA enabled.\n");
8020 	} else {
8021 		pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
8022 	}
8023 
8024 	return 0;
8025 }
8026 
8027 static const struct kernel_param_ops param_ops_slab_strict_numa __initconst = {
8028 	.flags = KERNEL_PARAM_OPS_FL_NOARG,
8029 	.set = setup_slab_strict_numa,
8030 };
8031 __core_param_cb(slab_strict_numa, &param_ops_slab_strict_numa, NULL, 0);
8032 #endif
8033 
8034 
8035 #ifdef CONFIG_HARDENED_USERCOPY
8036 /*
8037  * Rejects incorrectly sized objects and objects that are to be copied
8038  * to/from userspace but do not fall entirely within the containing slab
8039  * cache's usercopy region.
8040  *
8041  * Returns NULL if check passes, otherwise const char * to name of cache
8042  * to indicate an error.
8043  */
8044 void __check_heap_object(const void *ptr, unsigned long n,
8045 			 const struct slab *slab, bool to_user)
8046 {
8047 	struct kmem_cache *s;
8048 	unsigned int offset;
8049 	bool is_kfence = is_kfence_address(ptr);
8050 
8051 	ptr = kasan_reset_tag(ptr);
8052 
8053 	/* Find object and usable object size. */
8054 	s = slab->slab_cache;
8055 
8056 	/* Reject impossible pointers. */
8057 	if (ptr < slab_address(slab))
8058 		usercopy_abort("SLUB object not in SLUB page?!", NULL,
8059 			       to_user, 0, n);
8060 
8061 	/* Find offset within object. */
8062 	if (is_kfence)
8063 		offset = ptr - kfence_object_start(ptr);
8064 	else
8065 		offset = (ptr - slab_address(slab)) % s->size;
8066 
8067 	/* Adjust for redzone and reject if within the redzone. */
8068 	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
8069 		if (offset < s->red_left_pad)
8070 			usercopy_abort("SLUB object in left red zone",
8071 				       s->name, to_user, offset, n);
8072 		offset -= s->red_left_pad;
8073 	}
8074 
8075 	/* Allow address range falling entirely within usercopy region. */
8076 	if (offset >= s->useroffset &&
8077 	    offset - s->useroffset <= s->usersize &&
8078 	    n <= s->useroffset - offset + s->usersize)
8079 		return;
8080 
8081 	usercopy_abort("SLUB object", s->name, to_user, offset, n);
8082 }
8083 #endif /* CONFIG_HARDENED_USERCOPY */
8084 
8085 #define SHRINK_PROMOTE_MAX 32
8086 
8087 /*
8088  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
8089  * up most to the head of the partial lists. New allocations will then
8090  * fill those up and thus they can be removed from the partial lists.
8091  *
8092  * The slabs with the least items are placed last. This results in them
8093  * being allocated from last increasing the chance that the last objects
8094  * are freed in them.
8095  */
8096 static int __kmem_cache_do_shrink(struct kmem_cache *s)
8097 {
8098 	int node;
8099 	int i;
8100 	struct kmem_cache_node *n;
8101 	struct slab *slab;
8102 	struct slab *t;
8103 	struct list_head discard;
8104 	struct list_head promote[SHRINK_PROMOTE_MAX];
8105 	unsigned long flags;
8106 	int ret = 0;
8107 
8108 	for_each_kmem_cache_node(s, node, n) {
8109 		INIT_LIST_HEAD(&discard);
8110 		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
8111 			INIT_LIST_HEAD(promote + i);
8112 
8113 		if (n->barn)
8114 			barn_shrink(s, n->barn);
8115 
8116 		spin_lock_irqsave(&n->list_lock, flags);
8117 
8118 		/*
8119 		 * Build lists of slabs to discard or promote.
8120 		 *
8121 		 * Note that concurrent frees may occur while we hold the
8122 		 * list_lock. slab->inuse here is the upper limit.
8123 		 */
8124 		list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
8125 			int free = slab->objects - slab->inuse;
8126 
8127 			/* Do not reread slab->inuse */
8128 			barrier();
8129 
8130 			/* We do not keep full slabs on the list */
8131 			BUG_ON(free <= 0);
8132 
8133 			if (free == slab->objects) {
8134 				list_move(&slab->slab_list, &discard);
8135 				slab_clear_node_partial(slab);
8136 				n->nr_partial--;
8137 				dec_slabs_node(s, node, slab->objects);
8138 			} else if (free <= SHRINK_PROMOTE_MAX)
8139 				list_move(&slab->slab_list, promote + free - 1);
8140 		}
8141 
8142 		/*
8143 		 * Promote the slabs filled up most to the head of the
8144 		 * partial list.
8145 		 */
8146 		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
8147 			list_splice(promote + i, &n->partial);
8148 
8149 		spin_unlock_irqrestore(&n->list_lock, flags);
8150 
8151 		/* Release empty slabs */
8152 		list_for_each_entry_safe(slab, t, &discard, slab_list)
8153 			free_slab(s, slab);
8154 
8155 		if (node_nr_slabs(n))
8156 			ret = 1;
8157 	}
8158 
8159 	return ret;
8160 }
8161 
8162 int __kmem_cache_shrink(struct kmem_cache *s)
8163 {
8164 	flush_all(s);
8165 	return __kmem_cache_do_shrink(s);
8166 }
8167 
8168 static int slab_mem_going_offline_callback(void)
8169 {
8170 	struct kmem_cache *s;
8171 
8172 	mutex_lock(&slab_mutex);
8173 	list_for_each_entry(s, &slab_caches, list) {
8174 		flush_all_cpus_locked(s);
8175 		__kmem_cache_do_shrink(s);
8176 	}
8177 	mutex_unlock(&slab_mutex);
8178 
8179 	return 0;
8180 }
8181 
8182 static int slab_mem_going_online_callback(int nid)
8183 {
8184 	struct kmem_cache_node *n;
8185 	struct kmem_cache *s;
8186 	int ret = 0;
8187 
8188 	/*
8189 	 * We are bringing a node online. No memory is available yet. We must
8190 	 * allocate a kmem_cache_node structure in order to bring the node
8191 	 * online.
8192 	 */
8193 	mutex_lock(&slab_mutex);
8194 	list_for_each_entry(s, &slab_caches, list) {
8195 		struct node_barn *barn = NULL;
8196 
8197 		/*
8198 		 * The structure may already exist if the node was previously
8199 		 * onlined and offlined.
8200 		 */
8201 		if (get_node(s, nid))
8202 			continue;
8203 
8204 		if (cache_has_sheaves(s)) {
8205 			barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid);
8206 
8207 			if (!barn) {
8208 				ret = -ENOMEM;
8209 				goto out;
8210 			}
8211 		}
8212 
8213 		/*
8214 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
8215 		 *      since memory is not yet available from the node that
8216 		 *      is brought up.
8217 		 */
8218 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
8219 		if (!n) {
8220 			kfree(barn);
8221 			ret = -ENOMEM;
8222 			goto out;
8223 		}
8224 
8225 		init_kmem_cache_node(n, barn);
8226 
8227 		s->node[nid] = n;
8228 	}
8229 	/*
8230 	 * Any cache created after this point will also have kmem_cache_node
8231 	 * initialized for the new node.
8232 	 */
8233 	node_set(nid, slab_nodes);
8234 out:
8235 	mutex_unlock(&slab_mutex);
8236 	return ret;
8237 }
8238 
8239 static int slab_memory_callback(struct notifier_block *self,
8240 				unsigned long action, void *arg)
8241 {
8242 	struct node_notify *nn = arg;
8243 	int nid = nn->nid;
8244 	int ret = 0;
8245 
8246 	switch (action) {
8247 	case NODE_ADDING_FIRST_MEMORY:
8248 		ret = slab_mem_going_online_callback(nid);
8249 		break;
8250 	case NODE_REMOVING_LAST_MEMORY:
8251 		ret = slab_mem_going_offline_callback();
8252 		break;
8253 	}
8254 	if (ret)
8255 		ret = notifier_from_errno(ret);
8256 	else
8257 		ret = NOTIFY_OK;
8258 	return ret;
8259 }
8260 
8261 /********************************************************************
8262  *			Basic setup of slabs
8263  *******************************************************************/
8264 
8265 /*
8266  * Used for early kmem_cache structures that were allocated using
8267  * the page allocator. Allocate them properly then fix up the pointers
8268  * that may be pointing to the wrong kmem_cache structure.
8269  */
8270 
8271 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
8272 {
8273 	int node;
8274 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
8275 	struct kmem_cache_node *n;
8276 
8277 	memcpy(s, static_cache, kmem_cache->object_size);
8278 
8279 	for_each_kmem_cache_node(s, node, n) {
8280 		struct slab *p;
8281 
8282 		list_for_each_entry(p, &n->partial, slab_list)
8283 			p->slab_cache = s;
8284 
8285 #ifdef CONFIG_SLUB_DEBUG
8286 		list_for_each_entry(p, &n->full, slab_list)
8287 			p->slab_cache = s;
8288 #endif
8289 	}
8290 	list_add(&s->list, &slab_caches);
8291 	return s;
8292 }
8293 
8294 /*
8295  * Finish the sheaves initialization done normally by init_percpu_sheaves() and
8296  * init_kmem_cache_nodes(). For normal kmalloc caches we have to bootstrap it
8297  * since sheaves and barns are allocated by kmalloc.
8298  */
8299 static void __init bootstrap_cache_sheaves(struct kmem_cache *s)
8300 {
8301 	struct kmem_cache_args empty_args = {};
8302 	unsigned int capacity;
8303 	bool failed = false;
8304 	int node, cpu;
8305 
8306 	capacity = calculate_sheaf_capacity(s, &empty_args);
8307 
8308 	/* capacity can be 0 due to debugging or SLUB_TINY */
8309 	if (!capacity)
8310 		return;
8311 
8312 	for_each_node_mask(node, slab_nodes) {
8313 		struct node_barn *barn;
8314 
8315 		barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
8316 
8317 		if (!barn) {
8318 			failed = true;
8319 			goto out;
8320 		}
8321 
8322 		barn_init(barn);
8323 		get_node(s, node)->barn = barn;
8324 	}
8325 
8326 	for_each_possible_cpu(cpu) {
8327 		struct slub_percpu_sheaves *pcs;
8328 
8329 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
8330 
8331 		pcs->main = __alloc_empty_sheaf(s, GFP_KERNEL, capacity);
8332 
8333 		if (!pcs->main) {
8334 			failed = true;
8335 			break;
8336 		}
8337 	}
8338 
8339 out:
8340 	/*
8341 	 * It's still early in boot so treat this like same as a failure to
8342 	 * create the kmalloc cache in the first place
8343 	 */
8344 	if (failed)
8345 		panic("Out of memory when creating kmem_cache %s\n", s->name);
8346 
8347 	s->sheaf_capacity = capacity;
8348 }
8349 
8350 static void __init bootstrap_kmalloc_sheaves(void)
8351 {
8352 	enum kmalloc_cache_type type;
8353 
8354 	for (type = KMALLOC_NORMAL; type <= KMALLOC_RANDOM_END; type++) {
8355 		for (int idx = 0; idx < KMALLOC_SHIFT_HIGH + 1; idx++) {
8356 			if (kmalloc_caches[type][idx])
8357 				bootstrap_cache_sheaves(kmalloc_caches[type][idx]);
8358 		}
8359 	}
8360 }
8361 
8362 void __init kmem_cache_init(void)
8363 {
8364 	static __initdata struct kmem_cache boot_kmem_cache,
8365 		boot_kmem_cache_node;
8366 	int node;
8367 
8368 	if (debug_guardpage_minorder())
8369 		slub_max_order = 0;
8370 
8371 	/* Inform pointer hashing choice about slub debugging state. */
8372 	hash_pointers_finalize(__slub_debug_enabled());
8373 
8374 	kmem_cache_node = &boot_kmem_cache_node;
8375 	kmem_cache = &boot_kmem_cache;
8376 
8377 	/*
8378 	 * Initialize the nodemask for which we will allocate per node
8379 	 * structures. Here we don't need taking slab_mutex yet.
8380 	 */
8381 	for_each_node_state(node, N_MEMORY)
8382 		node_set(node, slab_nodes);
8383 
8384 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
8385 			sizeof(struct kmem_cache_node),
8386 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
8387 
8388 	hotplug_node_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
8389 
8390 	/* Able to allocate the per node structures */
8391 	slab_state = PARTIAL;
8392 
8393 	create_boot_cache(kmem_cache, "kmem_cache",
8394 			offsetof(struct kmem_cache, node) +
8395 				nr_node_ids * sizeof(struct kmem_cache_node *),
8396 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
8397 
8398 	kmem_cache = bootstrap(&boot_kmem_cache);
8399 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
8400 
8401 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
8402 	setup_kmalloc_cache_index_table();
8403 	create_kmalloc_caches();
8404 
8405 	bootstrap_kmalloc_sheaves();
8406 
8407 	/* Setup random freelists for each cache */
8408 	init_freelist_randomization();
8409 
8410 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
8411 				  slub_cpu_dead);
8412 
8413 	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
8414 		cache_line_size(),
8415 		slub_min_order, slub_max_order, slub_min_objects,
8416 		nr_cpu_ids, nr_node_ids);
8417 }
8418 
8419 void __init kmem_cache_init_late(void)
8420 {
8421 	flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM | WQ_PERCPU,
8422 				  0);
8423 	WARN_ON(!flushwq);
8424 #ifdef CONFIG_SLAB_FREELIST_RANDOM
8425 	prandom_init_once(&slab_rnd_state);
8426 #endif
8427 }
8428 
8429 int do_kmem_cache_create(struct kmem_cache *s, const char *name,
8430 			 unsigned int size, struct kmem_cache_args *args,
8431 			 slab_flags_t flags)
8432 {
8433 	int err = -EINVAL;
8434 
8435 	s->name = name;
8436 	s->size = s->object_size = size;
8437 
8438 	s->flags = kmem_cache_flags(flags, s->name);
8439 #ifdef CONFIG_SLAB_FREELIST_HARDENED
8440 	s->random = get_random_long();
8441 #endif
8442 	s->align = args->align;
8443 	s->ctor = args->ctor;
8444 #ifdef CONFIG_HARDENED_USERCOPY
8445 	s->useroffset = args->useroffset;
8446 	s->usersize = args->usersize;
8447 #endif
8448 
8449 	if (!calculate_sizes(args, s))
8450 		goto out;
8451 	if (disable_higher_order_debug) {
8452 		/*
8453 		 * Disable debugging flags that store metadata if the min slab
8454 		 * order increased.
8455 		 */
8456 		if (get_order(s->size) > get_order(s->object_size)) {
8457 			s->flags &= ~DEBUG_METADATA_FLAGS;
8458 			s->offset = 0;
8459 			if (!calculate_sizes(args, s))
8460 				goto out;
8461 		}
8462 	}
8463 
8464 #ifdef system_has_freelist_aba
8465 	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
8466 		/* Enable fast mode */
8467 		s->flags |= __CMPXCHG_DOUBLE;
8468 	}
8469 #endif
8470 
8471 	/*
8472 	 * The larger the object size is, the more slabs we want on the partial
8473 	 * list to avoid pounding the page allocator excessively.
8474 	 */
8475 	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
8476 	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
8477 
8478 	s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves);
8479 	if (!s->cpu_sheaves) {
8480 		err = -ENOMEM;
8481 		goto out;
8482 	}
8483 
8484 #ifdef CONFIG_NUMA
8485 	s->remote_node_defrag_ratio = 1000;
8486 #endif
8487 
8488 	/* Initialize the pre-computed randomized freelist if slab is up */
8489 	if (slab_state >= UP) {
8490 		if (init_cache_random_seq(s))
8491 			goto out;
8492 	}
8493 
8494 	if (!init_kmem_cache_nodes(s))
8495 		goto out;
8496 
8497 #ifdef CONFIG_SLUB_STATS
8498 	if (!alloc_kmem_cache_stats(s))
8499 		goto out;
8500 #endif
8501 
8502 	err = init_percpu_sheaves(s);
8503 	if (err)
8504 		goto out;
8505 
8506 	err = 0;
8507 
8508 	/* Mutex is not taken during early boot */
8509 	if (slab_state <= UP)
8510 		goto out;
8511 
8512 	/*
8513 	 * Failing to create sysfs files is not critical to SLUB functionality.
8514 	 * If it fails, proceed with cache creation without these files.
8515 	 */
8516 	if (sysfs_slab_add(s))
8517 		pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name);
8518 
8519 	if (s->flags & SLAB_STORE_USER)
8520 		debugfs_slab_add(s);
8521 
8522 out:
8523 	if (err)
8524 		__kmem_cache_release(s);
8525 	return err;
8526 }
8527 
8528 #ifdef SLAB_SUPPORTS_SYSFS
8529 static int count_inuse(struct slab *slab)
8530 {
8531 	return slab->inuse;
8532 }
8533 
8534 static int count_total(struct slab *slab)
8535 {
8536 	return slab->objects;
8537 }
8538 #endif
8539 
8540 #ifdef CONFIG_SLUB_DEBUG
8541 static void validate_slab(struct kmem_cache *s, struct slab *slab,
8542 			  unsigned long *obj_map)
8543 {
8544 	void *p;
8545 	void *addr = slab_address(slab);
8546 
8547 	if (!validate_slab_ptr(slab)) {
8548 		slab_err(s, slab, "Not a valid slab page");
8549 		return;
8550 	}
8551 
8552 	if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
8553 		return;
8554 
8555 	/* Now we know that a valid freelist exists */
8556 	__fill_map(obj_map, s, slab);
8557 	for_each_object(p, s, addr, slab->objects) {
8558 		u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
8559 			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
8560 
8561 		if (!check_object(s, slab, p, val))
8562 			break;
8563 	}
8564 }
8565 
8566 static int validate_slab_node(struct kmem_cache *s,
8567 		struct kmem_cache_node *n, unsigned long *obj_map)
8568 {
8569 	unsigned long count = 0;
8570 	struct slab *slab;
8571 	unsigned long flags;
8572 
8573 	spin_lock_irqsave(&n->list_lock, flags);
8574 
8575 	list_for_each_entry(slab, &n->partial, slab_list) {
8576 		validate_slab(s, slab, obj_map);
8577 		count++;
8578 	}
8579 	if (count != n->nr_partial) {
8580 		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
8581 		       s->name, count, n->nr_partial);
8582 		slab_add_kunit_errors();
8583 	}
8584 
8585 	if (!(s->flags & SLAB_STORE_USER))
8586 		goto out;
8587 
8588 	list_for_each_entry(slab, &n->full, slab_list) {
8589 		validate_slab(s, slab, obj_map);
8590 		count++;
8591 	}
8592 	if (count != node_nr_slabs(n)) {
8593 		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
8594 		       s->name, count, node_nr_slabs(n));
8595 		slab_add_kunit_errors();
8596 	}
8597 
8598 out:
8599 	spin_unlock_irqrestore(&n->list_lock, flags);
8600 	return count;
8601 }
8602 
8603 long validate_slab_cache(struct kmem_cache *s)
8604 {
8605 	int node;
8606 	unsigned long count = 0;
8607 	struct kmem_cache_node *n;
8608 	unsigned long *obj_map;
8609 
8610 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
8611 	if (!obj_map)
8612 		return -ENOMEM;
8613 
8614 	flush_all(s);
8615 	for_each_kmem_cache_node(s, node, n)
8616 		count += validate_slab_node(s, n, obj_map);
8617 
8618 	bitmap_free(obj_map);
8619 
8620 	return count;
8621 }
8622 EXPORT_SYMBOL(validate_slab_cache);
8623 
8624 #ifdef CONFIG_DEBUG_FS
8625 /*
8626  * Generate lists of code addresses where slabcache objects are allocated
8627  * and freed.
8628  */
8629 
8630 struct location {
8631 	depot_stack_handle_t handle;
8632 	unsigned long count;
8633 	unsigned long addr;
8634 	unsigned long waste;
8635 	long long sum_time;
8636 	long min_time;
8637 	long max_time;
8638 	long min_pid;
8639 	long max_pid;
8640 	DECLARE_BITMAP(cpus, NR_CPUS);
8641 	nodemask_t nodes;
8642 };
8643 
8644 struct loc_track {
8645 	unsigned long max;
8646 	unsigned long count;
8647 	struct location *loc;
8648 	loff_t idx;
8649 };
8650 
8651 static struct dentry *slab_debugfs_root;
8652 
8653 static void free_loc_track(struct loc_track *t)
8654 {
8655 	if (t->max)
8656 		free_pages((unsigned long)t->loc,
8657 			get_order(sizeof(struct location) * t->max));
8658 }
8659 
8660 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
8661 {
8662 	struct location *l;
8663 	int order;
8664 
8665 	order = get_order(sizeof(struct location) * max);
8666 
8667 	l = (void *)__get_free_pages(flags, order);
8668 	if (!l)
8669 		return 0;
8670 
8671 	if (t->count) {
8672 		memcpy(l, t->loc, sizeof(struct location) * t->count);
8673 		free_loc_track(t);
8674 	}
8675 	t->max = max;
8676 	t->loc = l;
8677 	return 1;
8678 }
8679 
8680 static int add_location(struct loc_track *t, struct kmem_cache *s,
8681 				const struct track *track,
8682 				unsigned int orig_size)
8683 {
8684 	long start, end, pos;
8685 	struct location *l;
8686 	unsigned long caddr, chandle, cwaste;
8687 	unsigned long age = jiffies - track->when;
8688 	depot_stack_handle_t handle = 0;
8689 	unsigned int waste = s->object_size - orig_size;
8690 
8691 #ifdef CONFIG_STACKDEPOT
8692 	handle = READ_ONCE(track->handle);
8693 #endif
8694 	start = -1;
8695 	end = t->count;
8696 
8697 	for ( ; ; ) {
8698 		pos = start + (end - start + 1) / 2;
8699 
8700 		/*
8701 		 * There is nothing at "end". If we end up there
8702 		 * we need to add something to before end.
8703 		 */
8704 		if (pos == end)
8705 			break;
8706 
8707 		l = &t->loc[pos];
8708 		caddr = l->addr;
8709 		chandle = l->handle;
8710 		cwaste = l->waste;
8711 		if ((track->addr == caddr) && (handle == chandle) &&
8712 			(waste == cwaste)) {
8713 
8714 			l->count++;
8715 			if (track->when) {
8716 				l->sum_time += age;
8717 				if (age < l->min_time)
8718 					l->min_time = age;
8719 				if (age > l->max_time)
8720 					l->max_time = age;
8721 
8722 				if (track->pid < l->min_pid)
8723 					l->min_pid = track->pid;
8724 				if (track->pid > l->max_pid)
8725 					l->max_pid = track->pid;
8726 
8727 				cpumask_set_cpu(track->cpu,
8728 						to_cpumask(l->cpus));
8729 			}
8730 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
8731 			return 1;
8732 		}
8733 
8734 		if (track->addr < caddr)
8735 			end = pos;
8736 		else if (track->addr == caddr && handle < chandle)
8737 			end = pos;
8738 		else if (track->addr == caddr && handle == chandle &&
8739 				waste < cwaste)
8740 			end = pos;
8741 		else
8742 			start = pos;
8743 	}
8744 
8745 	/*
8746 	 * Not found. Insert new tracking element.
8747 	 */
8748 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
8749 		return 0;
8750 
8751 	l = t->loc + pos;
8752 	if (pos < t->count)
8753 		memmove(l + 1, l,
8754 			(t->count - pos) * sizeof(struct location));
8755 	t->count++;
8756 	l->count = 1;
8757 	l->addr = track->addr;
8758 	l->sum_time = age;
8759 	l->min_time = age;
8760 	l->max_time = age;
8761 	l->min_pid = track->pid;
8762 	l->max_pid = track->pid;
8763 	l->handle = handle;
8764 	l->waste = waste;
8765 	cpumask_clear(to_cpumask(l->cpus));
8766 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
8767 	nodes_clear(l->nodes);
8768 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
8769 	return 1;
8770 }
8771 
8772 static void process_slab(struct loc_track *t, struct kmem_cache *s,
8773 		struct slab *slab, enum track_item alloc,
8774 		unsigned long *obj_map)
8775 {
8776 	void *addr = slab_address(slab);
8777 	bool is_alloc = (alloc == TRACK_ALLOC);
8778 	void *p;
8779 
8780 	__fill_map(obj_map, s, slab);
8781 
8782 	for_each_object(p, s, addr, slab->objects)
8783 		if (!test_bit(__obj_to_index(s, addr, p), obj_map))
8784 			add_location(t, s, get_track(s, p, alloc),
8785 				     is_alloc ? get_orig_size(s, p) :
8786 						s->object_size);
8787 }
8788 #endif  /* CONFIG_DEBUG_FS   */
8789 #endif	/* CONFIG_SLUB_DEBUG */
8790 
8791 #ifdef SLAB_SUPPORTS_SYSFS
8792 enum slab_stat_type {
8793 	SL_ALL,			/* All slabs */
8794 	SL_PARTIAL,		/* Only partially allocated slabs */
8795 	SL_CPU,			/* Only slabs used for cpu caches */
8796 	SL_OBJECTS,		/* Determine allocated objects not slabs */
8797 	SL_TOTAL		/* Determine object capacity not slabs */
8798 };
8799 
8800 #define SO_ALL		(1 << SL_ALL)
8801 #define SO_PARTIAL	(1 << SL_PARTIAL)
8802 #define SO_CPU		(1 << SL_CPU)
8803 #define SO_OBJECTS	(1 << SL_OBJECTS)
8804 #define SO_TOTAL	(1 << SL_TOTAL)
8805 
8806 static ssize_t show_slab_objects(struct kmem_cache *s,
8807 				 char *buf, unsigned long flags)
8808 {
8809 	unsigned long total = 0;
8810 	int node;
8811 	int x;
8812 	unsigned long *nodes;
8813 	int len = 0;
8814 
8815 	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
8816 	if (!nodes)
8817 		return -ENOMEM;
8818 
8819 	/*
8820 	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
8821 	 * already held which will conflict with an existing lock order:
8822 	 *
8823 	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
8824 	 *
8825 	 * We don't really need mem_hotplug_lock (to hold off
8826 	 * slab_mem_going_offline_callback) here because slab's memory hot
8827 	 * unplug code doesn't destroy the kmem_cache->node[] data.
8828 	 */
8829 
8830 #ifdef CONFIG_SLUB_DEBUG
8831 	if (flags & SO_ALL) {
8832 		struct kmem_cache_node *n;
8833 
8834 		for_each_kmem_cache_node(s, node, n) {
8835 
8836 			if (flags & SO_TOTAL)
8837 				x = node_nr_objs(n);
8838 			else if (flags & SO_OBJECTS)
8839 				x = node_nr_objs(n) - count_partial(n, count_free);
8840 			else
8841 				x = node_nr_slabs(n);
8842 			total += x;
8843 			nodes[node] += x;
8844 		}
8845 
8846 	} else
8847 #endif
8848 	if (flags & SO_PARTIAL) {
8849 		struct kmem_cache_node *n;
8850 
8851 		for_each_kmem_cache_node(s, node, n) {
8852 			if (flags & SO_TOTAL)
8853 				x = count_partial(n, count_total);
8854 			else if (flags & SO_OBJECTS)
8855 				x = count_partial(n, count_inuse);
8856 			else
8857 				x = n->nr_partial;
8858 			total += x;
8859 			nodes[node] += x;
8860 		}
8861 	}
8862 
8863 	len += sysfs_emit_at(buf, len, "%lu", total);
8864 #ifdef CONFIG_NUMA
8865 	for (node = 0; node < nr_node_ids; node++) {
8866 		if (nodes[node])
8867 			len += sysfs_emit_at(buf, len, " N%d=%lu",
8868 					     node, nodes[node]);
8869 	}
8870 #endif
8871 	len += sysfs_emit_at(buf, len, "\n");
8872 	kfree(nodes);
8873 
8874 	return len;
8875 }
8876 
8877 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
8878 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
8879 
8880 struct slab_attribute {
8881 	struct attribute attr;
8882 	ssize_t (*show)(struct kmem_cache *s, char *buf);
8883 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
8884 };
8885 
8886 #define SLAB_ATTR_RO(_name) \
8887 	static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
8888 
8889 #define SLAB_ATTR(_name) \
8890 	static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
8891 
8892 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
8893 {
8894 	return sysfs_emit(buf, "%u\n", s->size);
8895 }
8896 SLAB_ATTR_RO(slab_size);
8897 
8898 static ssize_t align_show(struct kmem_cache *s, char *buf)
8899 {
8900 	return sysfs_emit(buf, "%u\n", s->align);
8901 }
8902 SLAB_ATTR_RO(align);
8903 
8904 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
8905 {
8906 	return sysfs_emit(buf, "%u\n", s->object_size);
8907 }
8908 SLAB_ATTR_RO(object_size);
8909 
8910 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
8911 {
8912 	return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
8913 }
8914 SLAB_ATTR_RO(objs_per_slab);
8915 
8916 static ssize_t order_show(struct kmem_cache *s, char *buf)
8917 {
8918 	return sysfs_emit(buf, "%u\n", oo_order(s->oo));
8919 }
8920 SLAB_ATTR_RO(order);
8921 
8922 static ssize_t sheaf_capacity_show(struct kmem_cache *s, char *buf)
8923 {
8924 	return sysfs_emit(buf, "%u\n", s->sheaf_capacity);
8925 }
8926 SLAB_ATTR_RO(sheaf_capacity);
8927 
8928 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
8929 {
8930 	return sysfs_emit(buf, "%lu\n", s->min_partial);
8931 }
8932 
8933 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
8934 				 size_t length)
8935 {
8936 	unsigned long min;
8937 	int err;
8938 
8939 	err = kstrtoul(buf, 10, &min);
8940 	if (err)
8941 		return err;
8942 
8943 	s->min_partial = min;
8944 	return length;
8945 }
8946 SLAB_ATTR(min_partial);
8947 
8948 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
8949 {
8950 	return sysfs_emit(buf, "0\n");
8951 }
8952 
8953 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
8954 				 size_t length)
8955 {
8956 	unsigned int objects;
8957 	int err;
8958 
8959 	err = kstrtouint(buf, 10, &objects);
8960 	if (err)
8961 		return err;
8962 	if (objects)
8963 		return -EINVAL;
8964 
8965 	return length;
8966 }
8967 SLAB_ATTR(cpu_partial);
8968 
8969 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
8970 {
8971 	if (!s->ctor)
8972 		return 0;
8973 	return sysfs_emit(buf, "%pS\n", s->ctor);
8974 }
8975 SLAB_ATTR_RO(ctor);
8976 
8977 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
8978 {
8979 	return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
8980 }
8981 SLAB_ATTR_RO(aliases);
8982 
8983 static ssize_t partial_show(struct kmem_cache *s, char *buf)
8984 {
8985 	return show_slab_objects(s, buf, SO_PARTIAL);
8986 }
8987 SLAB_ATTR_RO(partial);
8988 
8989 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
8990 {
8991 	return show_slab_objects(s, buf, SO_CPU);
8992 }
8993 SLAB_ATTR_RO(cpu_slabs);
8994 
8995 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
8996 {
8997 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
8998 }
8999 SLAB_ATTR_RO(objects_partial);
9000 
9001 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
9002 {
9003 	return sysfs_emit(buf, "0(0)\n");
9004 }
9005 SLAB_ATTR_RO(slabs_cpu_partial);
9006 
9007 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
9008 {
9009 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
9010 }
9011 SLAB_ATTR_RO(reclaim_account);
9012 
9013 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
9014 {
9015 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
9016 }
9017 SLAB_ATTR_RO(hwcache_align);
9018 
9019 #ifdef CONFIG_ZONE_DMA
9020 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
9021 {
9022 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
9023 }
9024 SLAB_ATTR_RO(cache_dma);
9025 #endif
9026 
9027 #ifdef CONFIG_HARDENED_USERCOPY
9028 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
9029 {
9030 	return sysfs_emit(buf, "%u\n", s->usersize);
9031 }
9032 SLAB_ATTR_RO(usersize);
9033 #endif
9034 
9035 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
9036 {
9037 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
9038 }
9039 SLAB_ATTR_RO(destroy_by_rcu);
9040 
9041 #ifdef CONFIG_SLUB_DEBUG
9042 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
9043 {
9044 	return show_slab_objects(s, buf, SO_ALL);
9045 }
9046 SLAB_ATTR_RO(slabs);
9047 
9048 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
9049 {
9050 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
9051 }
9052 SLAB_ATTR_RO(total_objects);
9053 
9054 static ssize_t objects_show(struct kmem_cache *s, char *buf)
9055 {
9056 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
9057 }
9058 SLAB_ATTR_RO(objects);
9059 
9060 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
9061 {
9062 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
9063 }
9064 SLAB_ATTR_RO(sanity_checks);
9065 
9066 static ssize_t trace_show(struct kmem_cache *s, char *buf)
9067 {
9068 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
9069 }
9070 SLAB_ATTR_RO(trace);
9071 
9072 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
9073 {
9074 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
9075 }
9076 
9077 SLAB_ATTR_RO(red_zone);
9078 
9079 static ssize_t poison_show(struct kmem_cache *s, char *buf)
9080 {
9081 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
9082 }
9083 
9084 SLAB_ATTR_RO(poison);
9085 
9086 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
9087 {
9088 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
9089 }
9090 
9091 SLAB_ATTR_RO(store_user);
9092 
9093 static ssize_t validate_show(struct kmem_cache *s, char *buf)
9094 {
9095 	return 0;
9096 }
9097 
9098 static ssize_t validate_store(struct kmem_cache *s,
9099 			const char *buf, size_t length)
9100 {
9101 	int ret = -EINVAL;
9102 
9103 	if (buf[0] == '1' && kmem_cache_debug(s)) {
9104 		ret = validate_slab_cache(s);
9105 		if (ret >= 0)
9106 			ret = length;
9107 	}
9108 	return ret;
9109 }
9110 SLAB_ATTR(validate);
9111 
9112 #endif /* CONFIG_SLUB_DEBUG */
9113 
9114 #ifdef CONFIG_FAILSLAB
9115 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
9116 {
9117 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
9118 }
9119 
9120 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
9121 				size_t length)
9122 {
9123 	if (s->refcount > 1)
9124 		return -EINVAL;
9125 
9126 	if (buf[0] == '1')
9127 		WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
9128 	else
9129 		WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
9130 
9131 	return length;
9132 }
9133 SLAB_ATTR(failslab);
9134 #endif
9135 
9136 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
9137 {
9138 	return 0;
9139 }
9140 
9141 static ssize_t shrink_store(struct kmem_cache *s,
9142 			const char *buf, size_t length)
9143 {
9144 	if (buf[0] == '1')
9145 		kmem_cache_shrink(s);
9146 	else
9147 		return -EINVAL;
9148 	return length;
9149 }
9150 SLAB_ATTR(shrink);
9151 
9152 #ifdef CONFIG_NUMA
9153 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
9154 {
9155 	return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
9156 }
9157 
9158 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
9159 				const char *buf, size_t length)
9160 {
9161 	unsigned int ratio;
9162 	int err;
9163 
9164 	err = kstrtouint(buf, 10, &ratio);
9165 	if (err)
9166 		return err;
9167 	if (ratio > 100)
9168 		return -ERANGE;
9169 
9170 	s->remote_node_defrag_ratio = ratio * 10;
9171 
9172 	return length;
9173 }
9174 SLAB_ATTR(remote_node_defrag_ratio);
9175 #endif
9176 
9177 #ifdef CONFIG_SLUB_STATS
9178 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
9179 {
9180 	unsigned long sum  = 0;
9181 	int cpu;
9182 	int len = 0;
9183 	int *data = kmalloc_objs(int, nr_cpu_ids);
9184 
9185 	if (!data)
9186 		return -ENOMEM;
9187 
9188 	for_each_online_cpu(cpu) {
9189 		unsigned int x = per_cpu_ptr(s->cpu_stats, cpu)->stat[si];
9190 
9191 		data[cpu] = x;
9192 		sum += x;
9193 	}
9194 
9195 	len += sysfs_emit_at(buf, len, "%lu", sum);
9196 
9197 #ifdef CONFIG_SMP
9198 	for_each_online_cpu(cpu) {
9199 		if (data[cpu])
9200 			len += sysfs_emit_at(buf, len, " C%d=%u",
9201 					     cpu, data[cpu]);
9202 	}
9203 #endif
9204 	kfree(data);
9205 	len += sysfs_emit_at(buf, len, "\n");
9206 
9207 	return len;
9208 }
9209 
9210 static void clear_stat(struct kmem_cache *s, enum stat_item si)
9211 {
9212 	int cpu;
9213 
9214 	for_each_online_cpu(cpu)
9215 		per_cpu_ptr(s->cpu_stats, cpu)->stat[si] = 0;
9216 }
9217 
9218 #define STAT_ATTR(si, text) 					\
9219 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
9220 {								\
9221 	return show_stat(s, buf, si);				\
9222 }								\
9223 static ssize_t text##_store(struct kmem_cache *s,		\
9224 				const char *buf, size_t length)	\
9225 {								\
9226 	if (buf[0] != '0')					\
9227 		return -EINVAL;					\
9228 	clear_stat(s, si);					\
9229 	return length;						\
9230 }								\
9231 SLAB_ATTR(text);						\
9232 
9233 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
9234 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
9235 STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf);
9236 STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail);
9237 STAT_ATTR(FREE_FASTPATH, free_fastpath);
9238 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
9239 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
9240 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
9241 STAT_ATTR(ALLOC_SLAB, alloc_slab);
9242 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
9243 STAT_ATTR(FREE_SLAB, free_slab);
9244 STAT_ATTR(ORDER_FALLBACK, order_fallback);
9245 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
9246 STAT_ATTR(SHEAF_FLUSH, sheaf_flush);
9247 STAT_ATTR(SHEAF_REFILL, sheaf_refill);
9248 STAT_ATTR(SHEAF_ALLOC, sheaf_alloc);
9249 STAT_ATTR(SHEAF_FREE, sheaf_free);
9250 STAT_ATTR(BARN_GET, barn_get);
9251 STAT_ATTR(BARN_GET_FAIL, barn_get_fail);
9252 STAT_ATTR(BARN_PUT, barn_put);
9253 STAT_ATTR(BARN_PUT_FAIL, barn_put_fail);
9254 STAT_ATTR(SHEAF_PREFILL_FAST, sheaf_prefill_fast);
9255 STAT_ATTR(SHEAF_PREFILL_SLOW, sheaf_prefill_slow);
9256 STAT_ATTR(SHEAF_PREFILL_OVERSIZE, sheaf_prefill_oversize);
9257 STAT_ATTR(SHEAF_RETURN_FAST, sheaf_return_fast);
9258 STAT_ATTR(SHEAF_RETURN_SLOW, sheaf_return_slow);
9259 #endif	/* CONFIG_SLUB_STATS */
9260 
9261 #ifdef CONFIG_KFENCE
9262 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
9263 {
9264 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
9265 }
9266 
9267 static ssize_t skip_kfence_store(struct kmem_cache *s,
9268 			const char *buf, size_t length)
9269 {
9270 	int ret = length;
9271 
9272 	if (buf[0] == '0')
9273 		s->flags &= ~SLAB_SKIP_KFENCE;
9274 	else if (buf[0] == '1')
9275 		s->flags |= SLAB_SKIP_KFENCE;
9276 	else
9277 		ret = -EINVAL;
9278 
9279 	return ret;
9280 }
9281 SLAB_ATTR(skip_kfence);
9282 #endif
9283 
9284 static struct attribute *slab_attrs[] = {
9285 	&slab_size_attr.attr,
9286 	&object_size_attr.attr,
9287 	&objs_per_slab_attr.attr,
9288 	&order_attr.attr,
9289 	&sheaf_capacity_attr.attr,
9290 	&min_partial_attr.attr,
9291 	&cpu_partial_attr.attr,
9292 	&objects_partial_attr.attr,
9293 	&partial_attr.attr,
9294 	&cpu_slabs_attr.attr,
9295 	&ctor_attr.attr,
9296 	&aliases_attr.attr,
9297 	&align_attr.attr,
9298 	&hwcache_align_attr.attr,
9299 	&reclaim_account_attr.attr,
9300 	&destroy_by_rcu_attr.attr,
9301 	&shrink_attr.attr,
9302 	&slabs_cpu_partial_attr.attr,
9303 #ifdef CONFIG_SLUB_DEBUG
9304 	&total_objects_attr.attr,
9305 	&objects_attr.attr,
9306 	&slabs_attr.attr,
9307 	&sanity_checks_attr.attr,
9308 	&trace_attr.attr,
9309 	&red_zone_attr.attr,
9310 	&poison_attr.attr,
9311 	&store_user_attr.attr,
9312 	&validate_attr.attr,
9313 #endif
9314 #ifdef CONFIG_ZONE_DMA
9315 	&cache_dma_attr.attr,
9316 #endif
9317 #ifdef CONFIG_NUMA
9318 	&remote_node_defrag_ratio_attr.attr,
9319 #endif
9320 #ifdef CONFIG_SLUB_STATS
9321 	&alloc_fastpath_attr.attr,
9322 	&alloc_slowpath_attr.attr,
9323 	&free_rcu_sheaf_attr.attr,
9324 	&free_rcu_sheaf_fail_attr.attr,
9325 	&free_fastpath_attr.attr,
9326 	&free_slowpath_attr.attr,
9327 	&free_add_partial_attr.attr,
9328 	&free_remove_partial_attr.attr,
9329 	&alloc_slab_attr.attr,
9330 	&alloc_node_mismatch_attr.attr,
9331 	&free_slab_attr.attr,
9332 	&order_fallback_attr.attr,
9333 	&cmpxchg_double_fail_attr.attr,
9334 	&sheaf_flush_attr.attr,
9335 	&sheaf_refill_attr.attr,
9336 	&sheaf_alloc_attr.attr,
9337 	&sheaf_free_attr.attr,
9338 	&barn_get_attr.attr,
9339 	&barn_get_fail_attr.attr,
9340 	&barn_put_attr.attr,
9341 	&barn_put_fail_attr.attr,
9342 	&sheaf_prefill_fast_attr.attr,
9343 	&sheaf_prefill_slow_attr.attr,
9344 	&sheaf_prefill_oversize_attr.attr,
9345 	&sheaf_return_fast_attr.attr,
9346 	&sheaf_return_slow_attr.attr,
9347 #endif
9348 #ifdef CONFIG_FAILSLAB
9349 	&failslab_attr.attr,
9350 #endif
9351 #ifdef CONFIG_HARDENED_USERCOPY
9352 	&usersize_attr.attr,
9353 #endif
9354 #ifdef CONFIG_KFENCE
9355 	&skip_kfence_attr.attr,
9356 #endif
9357 
9358 	NULL
9359 };
9360 
9361 static const struct attribute_group slab_attr_group = {
9362 	.attrs = slab_attrs,
9363 };
9364 
9365 static ssize_t slab_attr_show(struct kobject *kobj,
9366 				struct attribute *attr,
9367 				char *buf)
9368 {
9369 	struct slab_attribute *attribute;
9370 	struct kmem_cache *s;
9371 
9372 	attribute = to_slab_attr(attr);
9373 	s = to_slab(kobj);
9374 
9375 	if (!attribute->show)
9376 		return -EIO;
9377 
9378 	return attribute->show(s, buf);
9379 }
9380 
9381 static ssize_t slab_attr_store(struct kobject *kobj,
9382 				struct attribute *attr,
9383 				const char *buf, size_t len)
9384 {
9385 	struct slab_attribute *attribute;
9386 	struct kmem_cache *s;
9387 
9388 	attribute = to_slab_attr(attr);
9389 	s = to_slab(kobj);
9390 
9391 	if (!attribute->store)
9392 		return -EIO;
9393 
9394 	return attribute->store(s, buf, len);
9395 }
9396 
9397 static void kmem_cache_release(struct kobject *k)
9398 {
9399 	slab_kmem_cache_release(to_slab(k));
9400 }
9401 
9402 static const struct sysfs_ops slab_sysfs_ops = {
9403 	.show = slab_attr_show,
9404 	.store = slab_attr_store,
9405 };
9406 
9407 static const struct kobj_type slab_ktype = {
9408 	.sysfs_ops = &slab_sysfs_ops,
9409 	.release = kmem_cache_release,
9410 };
9411 
9412 static struct kset *slab_kset;
9413 
9414 static inline struct kset *cache_kset(struct kmem_cache *s)
9415 {
9416 	return slab_kset;
9417 }
9418 
9419 #define ID_STR_LENGTH 32
9420 
9421 /* Create a unique string id for a slab cache:
9422  *
9423  * Format	:[flags-]size
9424  */
9425 static char *create_unique_id(struct kmem_cache *s)
9426 {
9427 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
9428 	char *p = name;
9429 
9430 	if (!name)
9431 		return ERR_PTR(-ENOMEM);
9432 
9433 	*p++ = ':';
9434 	/*
9435 	 * First flags affecting slabcache operations. We will only
9436 	 * get here for aliasable slabs so we do not need to support
9437 	 * too many flags. The flags here must cover all flags that
9438 	 * are matched during merging to guarantee that the id is
9439 	 * unique.
9440 	 */
9441 	if (s->flags & SLAB_CACHE_DMA)
9442 		*p++ = 'd';
9443 	if (s->flags & SLAB_CACHE_DMA32)
9444 		*p++ = 'D';
9445 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
9446 		*p++ = 'a';
9447 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
9448 		*p++ = 'F';
9449 	if (s->flags & SLAB_ACCOUNT)
9450 		*p++ = 'A';
9451 	if (p != name + 1)
9452 		*p++ = '-';
9453 	p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
9454 
9455 	if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
9456 		kfree(name);
9457 		return ERR_PTR(-EINVAL);
9458 	}
9459 	kmsan_unpoison_memory(name, p - name);
9460 	return name;
9461 }
9462 
9463 static int sysfs_slab_add(struct kmem_cache *s)
9464 {
9465 	int err;
9466 	const char *name;
9467 	struct kset *kset = cache_kset(s);
9468 	int unmergeable = slab_unmergeable(s);
9469 
9470 	if (!unmergeable && disable_higher_order_debug &&
9471 			(slub_debug & DEBUG_METADATA_FLAGS))
9472 		unmergeable = 1;
9473 
9474 	if (unmergeable) {
9475 		/*
9476 		 * Slabcache can never be merged so we can use the name proper.
9477 		 * This is typically the case for debug situations. In that
9478 		 * case we can catch duplicate names easily.
9479 		 */
9480 		sysfs_remove_link(&slab_kset->kobj, s->name);
9481 		name = s->name;
9482 	} else {
9483 		/*
9484 		 * Create a unique name for the slab as a target
9485 		 * for the symlinks.
9486 		 */
9487 		name = create_unique_id(s);
9488 		if (IS_ERR(name))
9489 			return PTR_ERR(name);
9490 	}
9491 
9492 	s->kobj.kset = kset;
9493 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
9494 	if (err)
9495 		goto out;
9496 
9497 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
9498 	if (err)
9499 		goto out_del_kobj;
9500 
9501 	if (!unmergeable) {
9502 		/* Setup first alias */
9503 		sysfs_slab_alias(s, s->name);
9504 	}
9505 out:
9506 	if (!unmergeable)
9507 		kfree(name);
9508 	return err;
9509 out_del_kobj:
9510 	kobject_del(&s->kobj);
9511 	goto out;
9512 }
9513 
9514 void sysfs_slab_unlink(struct kmem_cache *s)
9515 {
9516 	if (s->kobj.state_in_sysfs)
9517 		kobject_del(&s->kobj);
9518 }
9519 
9520 void sysfs_slab_release(struct kmem_cache *s)
9521 {
9522 	kobject_put(&s->kobj);
9523 }
9524 
9525 /*
9526  * Need to buffer aliases during bootup until sysfs becomes
9527  * available lest we lose that information.
9528  */
9529 struct saved_alias {
9530 	struct kmem_cache *s;
9531 	const char *name;
9532 	struct saved_alias *next;
9533 };
9534 
9535 static struct saved_alias *alias_list;
9536 
9537 int sysfs_slab_alias(struct kmem_cache *s, const char *name)
9538 {
9539 	struct saved_alias *al;
9540 
9541 	if (slab_state == FULL) {
9542 		/*
9543 		 * If we have a leftover link then remove it.
9544 		 */
9545 		sysfs_remove_link(&slab_kset->kobj, name);
9546 		/*
9547 		 * The original cache may have failed to generate sysfs file.
9548 		 * In that case, sysfs_create_link() returns -ENOENT and
9549 		 * symbolic link creation is skipped.
9550 		 */
9551 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
9552 	}
9553 
9554 	al = kmalloc_obj(struct saved_alias);
9555 	if (!al)
9556 		return -ENOMEM;
9557 
9558 	al->s = s;
9559 	al->name = name;
9560 	al->next = alias_list;
9561 	alias_list = al;
9562 	kmsan_unpoison_memory(al, sizeof(*al));
9563 	return 0;
9564 }
9565 
9566 static int __init slab_sysfs_init(void)
9567 {
9568 	struct kmem_cache *s;
9569 	int err;
9570 
9571 	mutex_lock(&slab_mutex);
9572 
9573 	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
9574 	if (!slab_kset) {
9575 		mutex_unlock(&slab_mutex);
9576 		pr_err("Cannot register slab subsystem.\n");
9577 		return -ENOMEM;
9578 	}
9579 
9580 	slab_state = FULL;
9581 
9582 	list_for_each_entry(s, &slab_caches, list) {
9583 		err = sysfs_slab_add(s);
9584 		if (err)
9585 			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
9586 			       s->name);
9587 	}
9588 
9589 	while (alias_list) {
9590 		struct saved_alias *al = alias_list;
9591 
9592 		alias_list = alias_list->next;
9593 		err = sysfs_slab_alias(al->s, al->name);
9594 		if (err)
9595 			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
9596 			       al->name);
9597 		kfree(al);
9598 	}
9599 
9600 	mutex_unlock(&slab_mutex);
9601 	return 0;
9602 }
9603 late_initcall(slab_sysfs_init);
9604 #endif /* SLAB_SUPPORTS_SYSFS */
9605 
9606 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
9607 static int slab_debugfs_show(struct seq_file *seq, void *v)
9608 {
9609 	struct loc_track *t = seq->private;
9610 	struct location *l;
9611 	unsigned long idx;
9612 
9613 	idx = (unsigned long) t->idx;
9614 	if (idx < t->count) {
9615 		l = &t->loc[idx];
9616 
9617 		seq_printf(seq, "%7ld ", l->count);
9618 
9619 		if (l->addr)
9620 			seq_printf(seq, "%pS", (void *)l->addr);
9621 		else
9622 			seq_puts(seq, "<not-available>");
9623 
9624 		if (l->waste)
9625 			seq_printf(seq, " waste=%lu/%lu",
9626 				l->count * l->waste, l->waste);
9627 
9628 		if (l->sum_time != l->min_time) {
9629 			seq_printf(seq, " age=%ld/%llu/%ld",
9630 				l->min_time, div_u64(l->sum_time, l->count),
9631 				l->max_time);
9632 		} else
9633 			seq_printf(seq, " age=%ld", l->min_time);
9634 
9635 		if (l->min_pid != l->max_pid)
9636 			seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
9637 		else
9638 			seq_printf(seq, " pid=%ld",
9639 				l->min_pid);
9640 
9641 		if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
9642 			seq_printf(seq, " cpus=%*pbl",
9643 				 cpumask_pr_args(to_cpumask(l->cpus)));
9644 
9645 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
9646 			seq_printf(seq, " nodes=%*pbl",
9647 				 nodemask_pr_args(&l->nodes));
9648 
9649 #ifdef CONFIG_STACKDEPOT
9650 		{
9651 			depot_stack_handle_t handle;
9652 			unsigned long *entries;
9653 			unsigned int nr_entries, j;
9654 
9655 			handle = READ_ONCE(l->handle);
9656 			if (handle) {
9657 				nr_entries = stack_depot_fetch(handle, &entries);
9658 				seq_puts(seq, "\n");
9659 				for (j = 0; j < nr_entries; j++)
9660 					seq_printf(seq, "        %pS\n", (void *)entries[j]);
9661 			}
9662 		}
9663 #endif
9664 		seq_puts(seq, "\n");
9665 	}
9666 
9667 	if (!idx && !t->count)
9668 		seq_puts(seq, "No data\n");
9669 
9670 	return 0;
9671 }
9672 
9673 static void slab_debugfs_stop(struct seq_file *seq, void *v)
9674 {
9675 }
9676 
9677 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
9678 {
9679 	struct loc_track *t = seq->private;
9680 
9681 	t->idx = ++(*ppos);
9682 	if (*ppos <= t->count)
9683 		return ppos;
9684 
9685 	return NULL;
9686 }
9687 
9688 static int cmp_loc_by_count(const void *a, const void *b)
9689 {
9690 	struct location *loc1 = (struct location *)a;
9691 	struct location *loc2 = (struct location *)b;
9692 
9693 	return cmp_int(loc2->count, loc1->count);
9694 }
9695 
9696 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
9697 {
9698 	struct loc_track *t = seq->private;
9699 
9700 	t->idx = *ppos;
9701 	return ppos;
9702 }
9703 
9704 static const struct seq_operations slab_debugfs_sops = {
9705 	.start  = slab_debugfs_start,
9706 	.next   = slab_debugfs_next,
9707 	.stop   = slab_debugfs_stop,
9708 	.show   = slab_debugfs_show,
9709 };
9710 
9711 static int slab_debug_trace_open(struct inode *inode, struct file *filep)
9712 {
9713 
9714 	struct kmem_cache_node *n;
9715 	enum track_item alloc;
9716 	int node;
9717 	struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
9718 						sizeof(struct loc_track));
9719 	struct kmem_cache *s = file_inode(filep)->i_private;
9720 	unsigned long *obj_map;
9721 
9722 	if (!t)
9723 		return -ENOMEM;
9724 
9725 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
9726 	if (!obj_map) {
9727 		seq_release_private(inode, filep);
9728 		return -ENOMEM;
9729 	}
9730 
9731 	alloc = debugfs_get_aux_num(filep);
9732 
9733 	if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
9734 		bitmap_free(obj_map);
9735 		seq_release_private(inode, filep);
9736 		return -ENOMEM;
9737 	}
9738 
9739 	for_each_kmem_cache_node(s, node, n) {
9740 		unsigned long flags;
9741 		struct slab *slab;
9742 
9743 		if (!node_nr_slabs(n))
9744 			continue;
9745 
9746 		spin_lock_irqsave(&n->list_lock, flags);
9747 		list_for_each_entry(slab, &n->partial, slab_list)
9748 			process_slab(t, s, slab, alloc, obj_map);
9749 		list_for_each_entry(slab, &n->full, slab_list)
9750 			process_slab(t, s, slab, alloc, obj_map);
9751 		spin_unlock_irqrestore(&n->list_lock, flags);
9752 	}
9753 
9754 	/* Sort locations by count */
9755 	sort(t->loc, t->count, sizeof(struct location),
9756 	     cmp_loc_by_count, NULL);
9757 
9758 	bitmap_free(obj_map);
9759 	return 0;
9760 }
9761 
9762 static int slab_debug_trace_release(struct inode *inode, struct file *file)
9763 {
9764 	struct seq_file *seq = file->private_data;
9765 	struct loc_track *t = seq->private;
9766 
9767 	free_loc_track(t);
9768 	return seq_release_private(inode, file);
9769 }
9770 
9771 static const struct file_operations slab_debugfs_fops = {
9772 	.open    = slab_debug_trace_open,
9773 	.read    = seq_read,
9774 	.llseek  = seq_lseek,
9775 	.release = slab_debug_trace_release,
9776 };
9777 
9778 static void debugfs_slab_add(struct kmem_cache *s)
9779 {
9780 	struct dentry *slab_cache_dir;
9781 
9782 	if (unlikely(!slab_debugfs_root))
9783 		return;
9784 
9785 	slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
9786 
9787 	debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s,
9788 					TRACK_ALLOC, &slab_debugfs_fops);
9789 
9790 	debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s,
9791 					TRACK_FREE, &slab_debugfs_fops);
9792 }
9793 
9794 void debugfs_slab_release(struct kmem_cache *s)
9795 {
9796 	debugfs_lookup_and_remove(s->name, slab_debugfs_root);
9797 }
9798 
9799 static int __init slab_debugfs_init(void)
9800 {
9801 	struct kmem_cache *s;
9802 
9803 	slab_debugfs_root = debugfs_create_dir("slab", NULL);
9804 
9805 	list_for_each_entry(s, &slab_caches, list)
9806 		if (s->flags & SLAB_STORE_USER)
9807 			debugfs_slab_add(s);
9808 
9809 	return 0;
9810 
9811 }
9812 __initcall(slab_debugfs_init);
9813 #endif
9814 /*
9815  * The /proc/slabinfo ABI
9816  */
9817 #ifdef CONFIG_SLUB_DEBUG
9818 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
9819 {
9820 	unsigned long nr_slabs = 0;
9821 	unsigned long nr_objs = 0;
9822 	unsigned long nr_free = 0;
9823 	int node;
9824 	struct kmem_cache_node *n;
9825 
9826 	for_each_kmem_cache_node(s, node, n) {
9827 		nr_slabs += node_nr_slabs(n);
9828 		nr_objs += node_nr_objs(n);
9829 		nr_free += count_partial_free_approx(n);
9830 	}
9831 
9832 	sinfo->active_objs = nr_objs - nr_free;
9833 	sinfo->num_objs = nr_objs;
9834 	sinfo->active_slabs = nr_slabs;
9835 	sinfo->num_slabs = nr_slabs;
9836 	sinfo->objects_per_slab = oo_objects(s->oo);
9837 	sinfo->cache_order = oo_order(s->oo);
9838 }
9839 #endif /* CONFIG_SLUB_DEBUG */
9840