xref: /linux/mm/slub.c (revision 5c1672705a1a2389f5ad78e0fea6f08ed32d6f18)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SLUB: A slab allocator that limits cache line use instead of queuing
4  * objects in per cpu and per node lists.
5  *
6  * The allocator synchronizes using per slab locks or atomic operations
7  * and only uses a centralized lock to manage a pool of partial slabs.
8  *
9  * (C) 2007 SGI, Christoph Lameter
10  * (C) 2011 Linux Foundation, Christoph Lameter
11  */
12 
13 #include <linux/mm.h>
14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */
15 #include <linux/module.h>
16 #include <linux/bit_spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/swab.h>
19 #include <linux/bitops.h>
20 #include <linux/slab.h>
21 #include "slab.h"
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/kasan.h>
25 #include <linux/kmsan.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/mempolicy.h>
29 #include <linux/ctype.h>
30 #include <linux/stackdepot.h>
31 #include <linux/debugobjects.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kfence.h>
34 #include <linux/memory.h>
35 #include <linux/math64.h>
36 #include <linux/fault-inject.h>
37 #include <linux/kmemleak.h>
38 #include <linux/stacktrace.h>
39 #include <linux/prefetch.h>
40 #include <linux/memcontrol.h>
41 #include <linux/random.h>
42 #include <kunit/test.h>
43 #include <kunit/test-bug.h>
44 #include <linux/sort.h>
45 
46 #include <linux/debugfs.h>
47 #include <trace/events/kmem.h>
48 
49 #include "internal.h"
50 
51 /*
52  * Lock order:
53  *   1. slab_mutex (Global Mutex)
54  *   2. node->list_lock (Spinlock)
55  *   3. kmem_cache->cpu_slab->lock (Local lock)
56  *   4. slab_lock(slab) (Only on some arches)
57  *   5. object_map_lock (Only for debugging)
58  *
59  *   slab_mutex
60  *
61  *   The role of the slab_mutex is to protect the list of all the slabs
62  *   and to synchronize major metadata changes to slab cache structures.
63  *   Also synchronizes memory hotplug callbacks.
64  *
65  *   slab_lock
66  *
67  *   The slab_lock is a wrapper around the page lock, thus it is a bit
68  *   spinlock.
69  *
70  *   The slab_lock is only used on arches that do not have the ability
71  *   to do a cmpxchg_double. It only protects:
72  *
73  *	A. slab->freelist	-> List of free objects in a slab
74  *	B. slab->inuse		-> Number of objects in use
75  *	C. slab->objects	-> Number of objects in slab
76  *	D. slab->frozen		-> frozen state
77  *
78  *   Frozen slabs
79  *
80  *   If a slab is frozen then it is exempt from list management. It is
81  *   the cpu slab which is actively allocated from by the processor that
82  *   froze it and it is not on any list. The processor that froze the
83  *   slab is the one who can perform list operations on the slab. Other
84  *   processors may put objects onto the freelist but the processor that
85  *   froze the slab is the only one that can retrieve the objects from the
86  *   slab's freelist.
87  *
88  *   CPU partial slabs
89  *
90  *   The partially empty slabs cached on the CPU partial list are used
91  *   for performance reasons, which speeds up the allocation process.
92  *   These slabs are not frozen, but are also exempt from list management,
93  *   by clearing the PG_workingset flag when moving out of the node
94  *   partial list. Please see __slab_free() for more details.
95  *
96  *   To sum up, the current scheme is:
97  *   - node partial slab: PG_Workingset && !frozen
98  *   - cpu partial slab: !PG_Workingset && !frozen
99  *   - cpu slab: !PG_Workingset && frozen
100  *   - full slab: !PG_Workingset && !frozen
101  *
102  *   list_lock
103  *
104  *   The list_lock protects the partial and full list on each node and
105  *   the partial slab counter. If taken then no new slabs may be added or
106  *   removed from the lists nor make the number of partial slabs be modified.
107  *   (Note that the total number of slabs is an atomic value that may be
108  *   modified without taking the list lock).
109  *
110  *   The list_lock is a centralized lock and thus we avoid taking it as
111  *   much as possible. As long as SLUB does not have to handle partial
112  *   slabs, operations can continue without any centralized lock. F.e.
113  *   allocating a long series of objects that fill up slabs does not require
114  *   the list lock.
115  *
116  *   For debug caches, all allocations are forced to go through a list_lock
117  *   protected region to serialize against concurrent validation.
118  *
119  *   cpu_slab->lock local lock
120  *
121  *   This locks protect slowpath manipulation of all kmem_cache_cpu fields
122  *   except the stat counters. This is a percpu structure manipulated only by
123  *   the local cpu, so the lock protects against being preempted or interrupted
124  *   by an irq. Fast path operations rely on lockless operations instead.
125  *
126  *   On PREEMPT_RT, the local lock neither disables interrupts nor preemption
127  *   which means the lockless fastpath cannot be used as it might interfere with
128  *   an in-progress slow path operations. In this case the local lock is always
129  *   taken but it still utilizes the freelist for the common operations.
130  *
131  *   lockless fastpaths
132  *
133  *   The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
134  *   are fully lockless when satisfied from the percpu slab (and when
135  *   cmpxchg_double is possible to use, otherwise slab_lock is taken).
136  *   They also don't disable preemption or migration or irqs. They rely on
137  *   the transaction id (tid) field to detect being preempted or moved to
138  *   another cpu.
139  *
140  *   irq, preemption, migration considerations
141  *
142  *   Interrupts are disabled as part of list_lock or local_lock operations, or
143  *   around the slab_lock operation, in order to make the slab allocator safe
144  *   to use in the context of an irq.
145  *
146  *   In addition, preemption (or migration on PREEMPT_RT) is disabled in the
147  *   allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
148  *   local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
149  *   doesn't have to be revalidated in each section protected by the local lock.
150  *
151  * SLUB assigns one slab for allocation to each processor.
152  * Allocations only occur from these slabs called cpu slabs.
153  *
154  * Slabs with free elements are kept on a partial list and during regular
155  * operations no list for full slabs is used. If an object in a full slab is
156  * freed then the slab will show up again on the partial lists.
157  * We track full slabs for debugging purposes though because otherwise we
158  * cannot scan all objects.
159  *
160  * Slabs are freed when they become empty. Teardown and setup is
161  * minimal so we rely on the page allocators per cpu caches for
162  * fast frees and allocs.
163  *
164  * slab->frozen		The slab is frozen and exempt from list processing.
165  * 			This means that the slab is dedicated to a purpose
166  * 			such as satisfying allocations for a specific
167  * 			processor. Objects may be freed in the slab while
168  * 			it is frozen but slab_free will then skip the usual
169  * 			list operations. It is up to the processor holding
170  * 			the slab to integrate the slab into the slab lists
171  * 			when the slab is no longer needed.
172  *
173  * 			One use of this flag is to mark slabs that are
174  * 			used for allocations. Then such a slab becomes a cpu
175  * 			slab. The cpu slab may be equipped with an additional
176  * 			freelist that allows lockless access to
177  * 			free objects in addition to the regular freelist
178  * 			that requires the slab lock.
179  *
180  * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
181  * 			options set. This moves	slab handling out of
182  * 			the fast path and disables lockless freelists.
183  */
184 
185 /*
186  * We could simply use migrate_disable()/enable() but as long as it's a
187  * function call even on !PREEMPT_RT, use inline preempt_disable() there.
188  */
189 #ifndef CONFIG_PREEMPT_RT
190 #define slub_get_cpu_ptr(var)		get_cpu_ptr(var)
191 #define slub_put_cpu_ptr(var)		put_cpu_ptr(var)
192 #define USE_LOCKLESS_FAST_PATH()	(true)
193 #else
194 #define slub_get_cpu_ptr(var)		\
195 ({					\
196 	migrate_disable();		\
197 	this_cpu_ptr(var);		\
198 })
199 #define slub_put_cpu_ptr(var)		\
200 do {					\
201 	(void)(var);			\
202 	migrate_enable();		\
203 } while (0)
204 #define USE_LOCKLESS_FAST_PATH()	(false)
205 #endif
206 
207 #ifndef CONFIG_SLUB_TINY
208 #define __fastpath_inline __always_inline
209 #else
210 #define __fastpath_inline
211 #endif
212 
213 #ifdef CONFIG_SLUB_DEBUG
214 #ifdef CONFIG_SLUB_DEBUG_ON
215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
216 #else
217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
218 #endif
219 #endif		/* CONFIG_SLUB_DEBUG */
220 
221 /* Structure holding parameters for get_partial() call chain */
222 struct partial_context {
223 	gfp_t flags;
224 	unsigned int orig_size;
225 	void *object;
226 };
227 
228 static inline bool kmem_cache_debug(struct kmem_cache *s)
229 {
230 	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
231 }
232 
233 static inline bool slub_debug_orig_size(struct kmem_cache *s)
234 {
235 	return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
236 			(s->flags & SLAB_KMALLOC));
237 }
238 
239 void *fixup_red_left(struct kmem_cache *s, void *p)
240 {
241 	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
242 		p += s->red_left_pad;
243 
244 	return p;
245 }
246 
247 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
248 {
249 #ifdef CONFIG_SLUB_CPU_PARTIAL
250 	return !kmem_cache_debug(s);
251 #else
252 	return false;
253 #endif
254 }
255 
256 /*
257  * Issues still to be resolved:
258  *
259  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
260  *
261  * - Variable sizing of the per node arrays
262  */
263 
264 /* Enable to log cmpxchg failures */
265 #undef SLUB_DEBUG_CMPXCHG
266 
267 #ifndef CONFIG_SLUB_TINY
268 /*
269  * Minimum number of partial slabs. These will be left on the partial
270  * lists even if they are empty. kmem_cache_shrink may reclaim them.
271  */
272 #define MIN_PARTIAL 5
273 
274 /*
275  * Maximum number of desirable partial slabs.
276  * The existence of more partial slabs makes kmem_cache_shrink
277  * sort the partial list by the number of objects in use.
278  */
279 #define MAX_PARTIAL 10
280 #else
281 #define MIN_PARTIAL 0
282 #define MAX_PARTIAL 0
283 #endif
284 
285 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
286 				SLAB_POISON | SLAB_STORE_USER)
287 
288 /*
289  * These debug flags cannot use CMPXCHG because there might be consistency
290  * issues when checking or reading debug information
291  */
292 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
293 				SLAB_TRACE)
294 
295 
296 /*
297  * Debugging flags that require metadata to be stored in the slab.  These get
298  * disabled when slab_debug=O is used and a cache's min order increases with
299  * metadata.
300  */
301 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
302 
303 #define OO_SHIFT	16
304 #define OO_MASK		((1 << OO_SHIFT) - 1)
305 #define MAX_OBJS_PER_PAGE	32767 /* since slab.objects is u15 */
306 
307 /* Internal SLUB flags */
308 /* Poison object */
309 #define __OBJECT_POISON		__SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
310 /* Use cmpxchg_double */
311 
312 #ifdef system_has_freelist_aba
313 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
314 #else
315 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_UNUSED
316 #endif
317 
318 /*
319  * Tracking user of a slab.
320  */
321 #define TRACK_ADDRS_COUNT 16
322 struct track {
323 	unsigned long addr;	/* Called from address */
324 #ifdef CONFIG_STACKDEPOT
325 	depot_stack_handle_t handle;
326 #endif
327 	int cpu;		/* Was running on cpu */
328 	int pid;		/* Pid context */
329 	unsigned long when;	/* When did the operation occur */
330 };
331 
332 enum track_item { TRACK_ALLOC, TRACK_FREE };
333 
334 #ifdef SLAB_SUPPORTS_SYSFS
335 static int sysfs_slab_add(struct kmem_cache *);
336 static int sysfs_slab_alias(struct kmem_cache *, const char *);
337 #else
338 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
339 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
340 							{ return 0; }
341 #endif
342 
343 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
344 static void debugfs_slab_add(struct kmem_cache *);
345 #else
346 static inline void debugfs_slab_add(struct kmem_cache *s) { }
347 #endif
348 
349 enum stat_item {
350 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
351 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
352 	FREE_FASTPATH,		/* Free to cpu slab */
353 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
354 	FREE_FROZEN,		/* Freeing to frozen slab */
355 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
356 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
357 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
358 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
359 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
360 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
361 	FREE_SLAB,		/* Slab freed to the page allocator */
362 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
363 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
364 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
365 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
366 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
367 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
368 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
369 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
370 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
371 	CMPXCHG_DOUBLE_FAIL,	/* Failures of slab freelist update */
372 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
373 	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
374 	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
375 	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
376 	NR_SLUB_STAT_ITEMS
377 };
378 
379 #ifndef CONFIG_SLUB_TINY
380 /*
381  * When changing the layout, make sure freelist and tid are still compatible
382  * with this_cpu_cmpxchg_double() alignment requirements.
383  */
384 struct kmem_cache_cpu {
385 	union {
386 		struct {
387 			void **freelist;	/* Pointer to next available object */
388 			unsigned long tid;	/* Globally unique transaction id */
389 		};
390 		freelist_aba_t freelist_tid;
391 	};
392 	struct slab *slab;	/* The slab from which we are allocating */
393 #ifdef CONFIG_SLUB_CPU_PARTIAL
394 	struct slab *partial;	/* Partially allocated slabs */
395 #endif
396 	local_lock_t lock;	/* Protects the fields above */
397 #ifdef CONFIG_SLUB_STATS
398 	unsigned int stat[NR_SLUB_STAT_ITEMS];
399 #endif
400 };
401 #endif /* CONFIG_SLUB_TINY */
402 
403 static inline void stat(const struct kmem_cache *s, enum stat_item si)
404 {
405 #ifdef CONFIG_SLUB_STATS
406 	/*
407 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
408 	 * avoid this_cpu_add()'s irq-disable overhead.
409 	 */
410 	raw_cpu_inc(s->cpu_slab->stat[si]);
411 #endif
412 }
413 
414 static inline
415 void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
416 {
417 #ifdef CONFIG_SLUB_STATS
418 	raw_cpu_add(s->cpu_slab->stat[si], v);
419 #endif
420 }
421 
422 /*
423  * The slab lists for all objects.
424  */
425 struct kmem_cache_node {
426 	spinlock_t list_lock;
427 	unsigned long nr_partial;
428 	struct list_head partial;
429 #ifdef CONFIG_SLUB_DEBUG
430 	atomic_long_t nr_slabs;
431 	atomic_long_t total_objects;
432 	struct list_head full;
433 #endif
434 };
435 
436 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
437 {
438 	return s->node[node];
439 }
440 
441 /*
442  * Iterator over all nodes. The body will be executed for each node that has
443  * a kmem_cache_node structure allocated (which is true for all online nodes)
444  */
445 #define for_each_kmem_cache_node(__s, __node, __n) \
446 	for (__node = 0; __node < nr_node_ids; __node++) \
447 		 if ((__n = get_node(__s, __node)))
448 
449 /*
450  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
451  * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
452  * differ during memory hotplug/hotremove operations.
453  * Protected by slab_mutex.
454  */
455 static nodemask_t slab_nodes;
456 
457 #ifndef CONFIG_SLUB_TINY
458 /*
459  * Workqueue used for flush_cpu_slab().
460  */
461 static struct workqueue_struct *flushwq;
462 #endif
463 
464 /********************************************************************
465  * 			Core slab cache functions
466  *******************************************************************/
467 
468 /*
469  * freeptr_t represents a SLUB freelist pointer, which might be encoded
470  * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
471  */
472 typedef struct { unsigned long v; } freeptr_t;
473 
474 /*
475  * Returns freelist pointer (ptr). With hardening, this is obfuscated
476  * with an XOR of the address where the pointer is held and a per-cache
477  * random number.
478  */
479 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
480 					    void *ptr, unsigned long ptr_addr)
481 {
482 	unsigned long encoded;
483 
484 #ifdef CONFIG_SLAB_FREELIST_HARDENED
485 	encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
486 #else
487 	encoded = (unsigned long)ptr;
488 #endif
489 	return (freeptr_t){.v = encoded};
490 }
491 
492 static inline void *freelist_ptr_decode(const struct kmem_cache *s,
493 					freeptr_t ptr, unsigned long ptr_addr)
494 {
495 	void *decoded;
496 
497 #ifdef CONFIG_SLAB_FREELIST_HARDENED
498 	decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
499 #else
500 	decoded = (void *)ptr.v;
501 #endif
502 	return decoded;
503 }
504 
505 static inline void *get_freepointer(struct kmem_cache *s, void *object)
506 {
507 	unsigned long ptr_addr;
508 	freeptr_t p;
509 
510 	object = kasan_reset_tag(object);
511 	ptr_addr = (unsigned long)object + s->offset;
512 	p = *(freeptr_t *)(ptr_addr);
513 	return freelist_ptr_decode(s, p, ptr_addr);
514 }
515 
516 #ifndef CONFIG_SLUB_TINY
517 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
518 {
519 	prefetchw(object + s->offset);
520 }
521 #endif
522 
523 /*
524  * When running under KMSAN, get_freepointer_safe() may return an uninitialized
525  * pointer value in the case the current thread loses the race for the next
526  * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
527  * slab_alloc_node() will fail, so the uninitialized value won't be used, but
528  * KMSAN will still check all arguments of cmpxchg because of imperfect
529  * handling of inline assembly.
530  * To work around this problem, we apply __no_kmsan_checks to ensure that
531  * get_freepointer_safe() returns initialized memory.
532  */
533 __no_kmsan_checks
534 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
535 {
536 	unsigned long freepointer_addr;
537 	freeptr_t p;
538 
539 	if (!debug_pagealloc_enabled_static())
540 		return get_freepointer(s, object);
541 
542 	object = kasan_reset_tag(object);
543 	freepointer_addr = (unsigned long)object + s->offset;
544 	copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p));
545 	return freelist_ptr_decode(s, p, freepointer_addr);
546 }
547 
548 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
549 {
550 	unsigned long freeptr_addr = (unsigned long)object + s->offset;
551 
552 #ifdef CONFIG_SLAB_FREELIST_HARDENED
553 	BUG_ON(object == fp); /* naive detection of double free or corruption */
554 #endif
555 
556 	freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
557 	*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
558 }
559 
560 /*
561  * See comment in calculate_sizes().
562  */
563 static inline bool freeptr_outside_object(struct kmem_cache *s)
564 {
565 	return s->offset >= s->inuse;
566 }
567 
568 /*
569  * Return offset of the end of info block which is inuse + free pointer if
570  * not overlapping with object.
571  */
572 static inline unsigned int get_info_end(struct kmem_cache *s)
573 {
574 	if (freeptr_outside_object(s))
575 		return s->inuse + sizeof(void *);
576 	else
577 		return s->inuse;
578 }
579 
580 /* Loop over all objects in a slab */
581 #define for_each_object(__p, __s, __addr, __objects) \
582 	for (__p = fixup_red_left(__s, __addr); \
583 		__p < (__addr) + (__objects) * (__s)->size; \
584 		__p += (__s)->size)
585 
586 static inline unsigned int order_objects(unsigned int order, unsigned int size)
587 {
588 	return ((unsigned int)PAGE_SIZE << order) / size;
589 }
590 
591 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
592 		unsigned int size)
593 {
594 	struct kmem_cache_order_objects x = {
595 		(order << OO_SHIFT) + order_objects(order, size)
596 	};
597 
598 	return x;
599 }
600 
601 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
602 {
603 	return x.x >> OO_SHIFT;
604 }
605 
606 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
607 {
608 	return x.x & OO_MASK;
609 }
610 
611 #ifdef CONFIG_SLUB_CPU_PARTIAL
612 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
613 {
614 	unsigned int nr_slabs;
615 
616 	s->cpu_partial = nr_objects;
617 
618 	/*
619 	 * We take the number of objects but actually limit the number of
620 	 * slabs on the per cpu partial list, in order to limit excessive
621 	 * growth of the list. For simplicity we assume that the slabs will
622 	 * be half-full.
623 	 */
624 	nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
625 	s->cpu_partial_slabs = nr_slabs;
626 }
627 #else
628 static inline void
629 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
630 {
631 }
632 #endif /* CONFIG_SLUB_CPU_PARTIAL */
633 
634 /*
635  * Per slab locking using the pagelock
636  */
637 static __always_inline void slab_lock(struct slab *slab)
638 {
639 	struct page *page = slab_page(slab);
640 
641 	VM_BUG_ON_PAGE(PageTail(page), page);
642 	bit_spin_lock(PG_locked, &page->flags);
643 }
644 
645 static __always_inline void slab_unlock(struct slab *slab)
646 {
647 	struct page *page = slab_page(slab);
648 
649 	VM_BUG_ON_PAGE(PageTail(page), page);
650 	bit_spin_unlock(PG_locked, &page->flags);
651 }
652 
653 static inline bool
654 __update_freelist_fast(struct slab *slab,
655 		      void *freelist_old, unsigned long counters_old,
656 		      void *freelist_new, unsigned long counters_new)
657 {
658 #ifdef system_has_freelist_aba
659 	freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
660 	freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
661 
662 	return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
663 #else
664 	return false;
665 #endif
666 }
667 
668 static inline bool
669 __update_freelist_slow(struct slab *slab,
670 		      void *freelist_old, unsigned long counters_old,
671 		      void *freelist_new, unsigned long counters_new)
672 {
673 	bool ret = false;
674 
675 	slab_lock(slab);
676 	if (slab->freelist == freelist_old &&
677 	    slab->counters == counters_old) {
678 		slab->freelist = freelist_new;
679 		slab->counters = counters_new;
680 		ret = true;
681 	}
682 	slab_unlock(slab);
683 
684 	return ret;
685 }
686 
687 /*
688  * Interrupts must be disabled (for the fallback code to work right), typically
689  * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
690  * part of bit_spin_lock(), is sufficient because the policy is not to allow any
691  * allocation/ free operation in hardirq context. Therefore nothing can
692  * interrupt the operation.
693  */
694 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
695 		void *freelist_old, unsigned long counters_old,
696 		void *freelist_new, unsigned long counters_new,
697 		const char *n)
698 {
699 	bool ret;
700 
701 	if (USE_LOCKLESS_FAST_PATH())
702 		lockdep_assert_irqs_disabled();
703 
704 	if (s->flags & __CMPXCHG_DOUBLE) {
705 		ret = __update_freelist_fast(slab, freelist_old, counters_old,
706 				            freelist_new, counters_new);
707 	} else {
708 		ret = __update_freelist_slow(slab, freelist_old, counters_old,
709 				            freelist_new, counters_new);
710 	}
711 	if (likely(ret))
712 		return true;
713 
714 	cpu_relax();
715 	stat(s, CMPXCHG_DOUBLE_FAIL);
716 
717 #ifdef SLUB_DEBUG_CMPXCHG
718 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
719 #endif
720 
721 	return false;
722 }
723 
724 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
725 		void *freelist_old, unsigned long counters_old,
726 		void *freelist_new, unsigned long counters_new,
727 		const char *n)
728 {
729 	bool ret;
730 
731 	if (s->flags & __CMPXCHG_DOUBLE) {
732 		ret = __update_freelist_fast(slab, freelist_old, counters_old,
733 				            freelist_new, counters_new);
734 	} else {
735 		unsigned long flags;
736 
737 		local_irq_save(flags);
738 		ret = __update_freelist_slow(slab, freelist_old, counters_old,
739 				            freelist_new, counters_new);
740 		local_irq_restore(flags);
741 	}
742 	if (likely(ret))
743 		return true;
744 
745 	cpu_relax();
746 	stat(s, CMPXCHG_DOUBLE_FAIL);
747 
748 #ifdef SLUB_DEBUG_CMPXCHG
749 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
750 #endif
751 
752 	return false;
753 }
754 
755 #ifdef CONFIG_SLUB_DEBUG
756 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
757 static DEFINE_SPINLOCK(object_map_lock);
758 
759 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
760 		       struct slab *slab)
761 {
762 	void *addr = slab_address(slab);
763 	void *p;
764 
765 	bitmap_zero(obj_map, slab->objects);
766 
767 	for (p = slab->freelist; p; p = get_freepointer(s, p))
768 		set_bit(__obj_to_index(s, addr, p), obj_map);
769 }
770 
771 #if IS_ENABLED(CONFIG_KUNIT)
772 static bool slab_add_kunit_errors(void)
773 {
774 	struct kunit_resource *resource;
775 
776 	if (!kunit_get_current_test())
777 		return false;
778 
779 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
780 	if (!resource)
781 		return false;
782 
783 	(*(int *)resource->data)++;
784 	kunit_put_resource(resource);
785 	return true;
786 }
787 #else
788 static inline bool slab_add_kunit_errors(void) { return false; }
789 #endif
790 
791 static inline unsigned int size_from_object(struct kmem_cache *s)
792 {
793 	if (s->flags & SLAB_RED_ZONE)
794 		return s->size - s->red_left_pad;
795 
796 	return s->size;
797 }
798 
799 static inline void *restore_red_left(struct kmem_cache *s, void *p)
800 {
801 	if (s->flags & SLAB_RED_ZONE)
802 		p -= s->red_left_pad;
803 
804 	return p;
805 }
806 
807 /*
808  * Debug settings:
809  */
810 #if defined(CONFIG_SLUB_DEBUG_ON)
811 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
812 #else
813 static slab_flags_t slub_debug;
814 #endif
815 
816 static char *slub_debug_string;
817 static int disable_higher_order_debug;
818 
819 /*
820  * slub is about to manipulate internal object metadata.  This memory lies
821  * outside the range of the allocated object, so accessing it would normally
822  * be reported by kasan as a bounds error.  metadata_access_enable() is used
823  * to tell kasan that these accesses are OK.
824  */
825 static inline void metadata_access_enable(void)
826 {
827 	kasan_disable_current();
828 }
829 
830 static inline void metadata_access_disable(void)
831 {
832 	kasan_enable_current();
833 }
834 
835 /*
836  * Object debugging
837  */
838 
839 /* Verify that a pointer has an address that is valid within a slab page */
840 static inline int check_valid_pointer(struct kmem_cache *s,
841 				struct slab *slab, void *object)
842 {
843 	void *base;
844 
845 	if (!object)
846 		return 1;
847 
848 	base = slab_address(slab);
849 	object = kasan_reset_tag(object);
850 	object = restore_red_left(s, object);
851 	if (object < base || object >= base + slab->objects * s->size ||
852 		(object - base) % s->size) {
853 		return 0;
854 	}
855 
856 	return 1;
857 }
858 
859 static void print_section(char *level, char *text, u8 *addr,
860 			  unsigned int length)
861 {
862 	metadata_access_enable();
863 	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
864 			16, 1, kasan_reset_tag((void *)addr), length, 1);
865 	metadata_access_disable();
866 }
867 
868 static struct track *get_track(struct kmem_cache *s, void *object,
869 	enum track_item alloc)
870 {
871 	struct track *p;
872 
873 	p = object + get_info_end(s);
874 
875 	return kasan_reset_tag(p + alloc);
876 }
877 
878 #ifdef CONFIG_STACKDEPOT
879 static noinline depot_stack_handle_t set_track_prepare(void)
880 {
881 	depot_stack_handle_t handle;
882 	unsigned long entries[TRACK_ADDRS_COUNT];
883 	unsigned int nr_entries;
884 
885 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
886 	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
887 
888 	return handle;
889 }
890 #else
891 static inline depot_stack_handle_t set_track_prepare(void)
892 {
893 	return 0;
894 }
895 #endif
896 
897 static void set_track_update(struct kmem_cache *s, void *object,
898 			     enum track_item alloc, unsigned long addr,
899 			     depot_stack_handle_t handle)
900 {
901 	struct track *p = get_track(s, object, alloc);
902 
903 #ifdef CONFIG_STACKDEPOT
904 	p->handle = handle;
905 #endif
906 	p->addr = addr;
907 	p->cpu = smp_processor_id();
908 	p->pid = current->pid;
909 	p->when = jiffies;
910 }
911 
912 static __always_inline void set_track(struct kmem_cache *s, void *object,
913 				      enum track_item alloc, unsigned long addr)
914 {
915 	depot_stack_handle_t handle = set_track_prepare();
916 
917 	set_track_update(s, object, alloc, addr, handle);
918 }
919 
920 static void init_tracking(struct kmem_cache *s, void *object)
921 {
922 	struct track *p;
923 
924 	if (!(s->flags & SLAB_STORE_USER))
925 		return;
926 
927 	p = get_track(s, object, TRACK_ALLOC);
928 	memset(p, 0, 2*sizeof(struct track));
929 }
930 
931 static void print_track(const char *s, struct track *t, unsigned long pr_time)
932 {
933 	depot_stack_handle_t handle __maybe_unused;
934 
935 	if (!t->addr)
936 		return;
937 
938 	pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
939 	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
940 #ifdef CONFIG_STACKDEPOT
941 	handle = READ_ONCE(t->handle);
942 	if (handle)
943 		stack_depot_print(handle);
944 	else
945 		pr_err("object allocation/free stack trace missing\n");
946 #endif
947 }
948 
949 void print_tracking(struct kmem_cache *s, void *object)
950 {
951 	unsigned long pr_time = jiffies;
952 	if (!(s->flags & SLAB_STORE_USER))
953 		return;
954 
955 	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
956 	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
957 }
958 
959 static void print_slab_info(const struct slab *slab)
960 {
961 	struct folio *folio = (struct folio *)slab_folio(slab);
962 
963 	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
964 	       slab, slab->objects, slab->inuse, slab->freelist,
965 	       folio_flags(folio, 0));
966 }
967 
968 /*
969  * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
970  * family will round up the real request size to these fixed ones, so
971  * there could be an extra area than what is requested. Save the original
972  * request size in the meta data area, for better debug and sanity check.
973  */
974 static inline void set_orig_size(struct kmem_cache *s,
975 				void *object, unsigned int orig_size)
976 {
977 	void *p = kasan_reset_tag(object);
978 	unsigned int kasan_meta_size;
979 
980 	if (!slub_debug_orig_size(s))
981 		return;
982 
983 	/*
984 	 * KASAN can save its free meta data inside of the object at offset 0.
985 	 * If this meta data size is larger than 'orig_size', it will overlap
986 	 * the data redzone in [orig_size+1, object_size]. Thus, we adjust
987 	 * 'orig_size' to be as at least as big as KASAN's meta data.
988 	 */
989 	kasan_meta_size = kasan_metadata_size(s, true);
990 	if (kasan_meta_size > orig_size)
991 		orig_size = kasan_meta_size;
992 
993 	p += get_info_end(s);
994 	p += sizeof(struct track) * 2;
995 
996 	*(unsigned int *)p = orig_size;
997 }
998 
999 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
1000 {
1001 	void *p = kasan_reset_tag(object);
1002 
1003 	if (!slub_debug_orig_size(s))
1004 		return s->object_size;
1005 
1006 	p += get_info_end(s);
1007 	p += sizeof(struct track) * 2;
1008 
1009 	return *(unsigned int *)p;
1010 }
1011 
1012 void skip_orig_size_check(struct kmem_cache *s, const void *object)
1013 {
1014 	set_orig_size(s, (void *)object, s->object_size);
1015 }
1016 
1017 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
1018 {
1019 	struct va_format vaf;
1020 	va_list args;
1021 
1022 	va_start(args, fmt);
1023 	vaf.fmt = fmt;
1024 	vaf.va = &args;
1025 	pr_err("=============================================================================\n");
1026 	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
1027 	pr_err("-----------------------------------------------------------------------------\n\n");
1028 	va_end(args);
1029 }
1030 
1031 __printf(2, 3)
1032 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
1033 {
1034 	struct va_format vaf;
1035 	va_list args;
1036 
1037 	if (slab_add_kunit_errors())
1038 		return;
1039 
1040 	va_start(args, fmt);
1041 	vaf.fmt = fmt;
1042 	vaf.va = &args;
1043 	pr_err("FIX %s: %pV\n", s->name, &vaf);
1044 	va_end(args);
1045 }
1046 
1047 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1048 {
1049 	unsigned int off;	/* Offset of last byte */
1050 	u8 *addr = slab_address(slab);
1051 
1052 	print_tracking(s, p);
1053 
1054 	print_slab_info(slab);
1055 
1056 	pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1057 	       p, p - addr, get_freepointer(s, p));
1058 
1059 	if (s->flags & SLAB_RED_ZONE)
1060 		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
1061 			      s->red_left_pad);
1062 	else if (p > addr + 16)
1063 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1064 
1065 	print_section(KERN_ERR,         "Object   ", p,
1066 		      min_t(unsigned int, s->object_size, PAGE_SIZE));
1067 	if (s->flags & SLAB_RED_ZONE)
1068 		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
1069 			s->inuse - s->object_size);
1070 
1071 	off = get_info_end(s);
1072 
1073 	if (s->flags & SLAB_STORE_USER)
1074 		off += 2 * sizeof(struct track);
1075 
1076 	if (slub_debug_orig_size(s))
1077 		off += sizeof(unsigned int);
1078 
1079 	off += kasan_metadata_size(s, false);
1080 
1081 	if (off != size_from_object(s))
1082 		/* Beginning of the filler is the free pointer */
1083 		print_section(KERN_ERR, "Padding  ", p + off,
1084 			      size_from_object(s) - off);
1085 
1086 	dump_stack();
1087 }
1088 
1089 static void object_err(struct kmem_cache *s, struct slab *slab,
1090 			u8 *object, char *reason)
1091 {
1092 	if (slab_add_kunit_errors())
1093 		return;
1094 
1095 	slab_bug(s, "%s", reason);
1096 	print_trailer(s, slab, object);
1097 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1098 }
1099 
1100 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1101 			       void **freelist, void *nextfree)
1102 {
1103 	if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
1104 	    !check_valid_pointer(s, slab, nextfree) && freelist) {
1105 		object_err(s, slab, *freelist, "Freechain corrupt");
1106 		*freelist = NULL;
1107 		slab_fix(s, "Isolate corrupted freechain");
1108 		return true;
1109 	}
1110 
1111 	return false;
1112 }
1113 
1114 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1115 			const char *fmt, ...)
1116 {
1117 	va_list args;
1118 	char buf[100];
1119 
1120 	if (slab_add_kunit_errors())
1121 		return;
1122 
1123 	va_start(args, fmt);
1124 	vsnprintf(buf, sizeof(buf), fmt, args);
1125 	va_end(args);
1126 	slab_bug(s, "%s", buf);
1127 	print_slab_info(slab);
1128 	dump_stack();
1129 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1130 }
1131 
1132 static void init_object(struct kmem_cache *s, void *object, u8 val)
1133 {
1134 	u8 *p = kasan_reset_tag(object);
1135 	unsigned int poison_size = s->object_size;
1136 
1137 	if (s->flags & SLAB_RED_ZONE) {
1138 		memset(p - s->red_left_pad, val, s->red_left_pad);
1139 
1140 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1141 			/*
1142 			 * Redzone the extra allocated space by kmalloc than
1143 			 * requested, and the poison size will be limited to
1144 			 * the original request size accordingly.
1145 			 */
1146 			poison_size = get_orig_size(s, object);
1147 		}
1148 	}
1149 
1150 	if (s->flags & __OBJECT_POISON) {
1151 		memset(p, POISON_FREE, poison_size - 1);
1152 		p[poison_size - 1] = POISON_END;
1153 	}
1154 
1155 	if (s->flags & SLAB_RED_ZONE)
1156 		memset(p + poison_size, val, s->inuse - poison_size);
1157 }
1158 
1159 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
1160 						void *from, void *to)
1161 {
1162 	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1163 	memset(from, data, to - from);
1164 }
1165 
1166 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1167 			u8 *object, char *what,
1168 			u8 *start, unsigned int value, unsigned int bytes)
1169 {
1170 	u8 *fault;
1171 	u8 *end;
1172 	u8 *addr = slab_address(slab);
1173 
1174 	metadata_access_enable();
1175 	fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1176 	metadata_access_disable();
1177 	if (!fault)
1178 		return 1;
1179 
1180 	end = start + bytes;
1181 	while (end > fault && end[-1] == value)
1182 		end--;
1183 
1184 	if (slab_add_kunit_errors())
1185 		goto skip_bug_print;
1186 
1187 	slab_bug(s, "%s overwritten", what);
1188 	pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1189 					fault, end - 1, fault - addr,
1190 					fault[0], value);
1191 	print_trailer(s, slab, object);
1192 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1193 
1194 skip_bug_print:
1195 	restore_bytes(s, what, value, fault, end);
1196 	return 0;
1197 }
1198 
1199 /*
1200  * Object layout:
1201  *
1202  * object address
1203  * 	Bytes of the object to be managed.
1204  * 	If the freepointer may overlay the object then the free
1205  *	pointer is at the middle of the object.
1206  *
1207  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
1208  * 	0xa5 (POISON_END)
1209  *
1210  * object + s->object_size
1211  * 	Padding to reach word boundary. This is also used for Redzoning.
1212  * 	Padding is extended by another word if Redzoning is enabled and
1213  * 	object_size == inuse.
1214  *
1215  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
1216  * 	0xcc (RED_ACTIVE) for objects in use.
1217  *
1218  * object + s->inuse
1219  * 	Meta data starts here.
1220  *
1221  * 	A. Free pointer (if we cannot overwrite object on free)
1222  * 	B. Tracking data for SLAB_STORE_USER
1223  *	C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1224  *	D. Padding to reach required alignment boundary or at minimum
1225  * 		one word if debugging is on to be able to detect writes
1226  * 		before the word boundary.
1227  *
1228  *	Padding is done using 0x5a (POISON_INUSE)
1229  *
1230  * object + s->size
1231  * 	Nothing is used beyond s->size.
1232  *
1233  * If slabcaches are merged then the object_size and inuse boundaries are mostly
1234  * ignored. And therefore no slab options that rely on these boundaries
1235  * may be used with merged slabcaches.
1236  */
1237 
1238 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1239 {
1240 	unsigned long off = get_info_end(s);	/* The end of info */
1241 
1242 	if (s->flags & SLAB_STORE_USER) {
1243 		/* We also have user information there */
1244 		off += 2 * sizeof(struct track);
1245 
1246 		if (s->flags & SLAB_KMALLOC)
1247 			off += sizeof(unsigned int);
1248 	}
1249 
1250 	off += kasan_metadata_size(s, false);
1251 
1252 	if (size_from_object(s) == off)
1253 		return 1;
1254 
1255 	return check_bytes_and_report(s, slab, p, "Object padding",
1256 			p + off, POISON_INUSE, size_from_object(s) - off);
1257 }
1258 
1259 /* Check the pad bytes at the end of a slab page */
1260 static void slab_pad_check(struct kmem_cache *s, struct slab *slab)
1261 {
1262 	u8 *start;
1263 	u8 *fault;
1264 	u8 *end;
1265 	u8 *pad;
1266 	int length;
1267 	int remainder;
1268 
1269 	if (!(s->flags & SLAB_POISON))
1270 		return;
1271 
1272 	start = slab_address(slab);
1273 	length = slab_size(slab);
1274 	end = start + length;
1275 	remainder = length % s->size;
1276 	if (!remainder)
1277 		return;
1278 
1279 	pad = end - remainder;
1280 	metadata_access_enable();
1281 	fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1282 	metadata_access_disable();
1283 	if (!fault)
1284 		return;
1285 	while (end > fault && end[-1] == POISON_INUSE)
1286 		end--;
1287 
1288 	slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1289 			fault, end - 1, fault - start);
1290 	print_section(KERN_ERR, "Padding ", pad, remainder);
1291 
1292 	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1293 }
1294 
1295 static int check_object(struct kmem_cache *s, struct slab *slab,
1296 					void *object, u8 val)
1297 {
1298 	u8 *p = object;
1299 	u8 *endobject = object + s->object_size;
1300 	unsigned int orig_size, kasan_meta_size;
1301 
1302 	if (s->flags & SLAB_RED_ZONE) {
1303 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1304 			object - s->red_left_pad, val, s->red_left_pad))
1305 			return 0;
1306 
1307 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1308 			endobject, val, s->inuse - s->object_size))
1309 			return 0;
1310 
1311 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1312 			orig_size = get_orig_size(s, object);
1313 
1314 			if (s->object_size > orig_size  &&
1315 				!check_bytes_and_report(s, slab, object,
1316 					"kmalloc Redzone", p + orig_size,
1317 					val, s->object_size - orig_size)) {
1318 				return 0;
1319 			}
1320 		}
1321 	} else {
1322 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1323 			check_bytes_and_report(s, slab, p, "Alignment padding",
1324 				endobject, POISON_INUSE,
1325 				s->inuse - s->object_size);
1326 		}
1327 	}
1328 
1329 	if (s->flags & SLAB_POISON) {
1330 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1331 			/*
1332 			 * KASAN can save its free meta data inside of the
1333 			 * object at offset 0. Thus, skip checking the part of
1334 			 * the redzone that overlaps with the meta data.
1335 			 */
1336 			kasan_meta_size = kasan_metadata_size(s, true);
1337 			if (kasan_meta_size < s->object_size - 1 &&
1338 			    !check_bytes_and_report(s, slab, p, "Poison",
1339 					p + kasan_meta_size, POISON_FREE,
1340 					s->object_size - kasan_meta_size - 1))
1341 				return 0;
1342 			if (kasan_meta_size < s->object_size &&
1343 			    !check_bytes_and_report(s, slab, p, "End Poison",
1344 					p + s->object_size - 1, POISON_END, 1))
1345 				return 0;
1346 		}
1347 		/*
1348 		 * check_pad_bytes cleans up on its own.
1349 		 */
1350 		check_pad_bytes(s, slab, p);
1351 	}
1352 
1353 	if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
1354 		/*
1355 		 * Object and freepointer overlap. Cannot check
1356 		 * freepointer while object is allocated.
1357 		 */
1358 		return 1;
1359 
1360 	/* Check free pointer validity */
1361 	if (!check_valid_pointer(s, slab, get_freepointer(s, p))) {
1362 		object_err(s, slab, p, "Freepointer corrupt");
1363 		/*
1364 		 * No choice but to zap it and thus lose the remainder
1365 		 * of the free objects in this slab. May cause
1366 		 * another error because the object count is now wrong.
1367 		 */
1368 		set_freepointer(s, p, NULL);
1369 		return 0;
1370 	}
1371 	return 1;
1372 }
1373 
1374 static int check_slab(struct kmem_cache *s, struct slab *slab)
1375 {
1376 	int maxobj;
1377 
1378 	if (!folio_test_slab(slab_folio(slab))) {
1379 		slab_err(s, slab, "Not a valid slab page");
1380 		return 0;
1381 	}
1382 
1383 	maxobj = order_objects(slab_order(slab), s->size);
1384 	if (slab->objects > maxobj) {
1385 		slab_err(s, slab, "objects %u > max %u",
1386 			slab->objects, maxobj);
1387 		return 0;
1388 	}
1389 	if (slab->inuse > slab->objects) {
1390 		slab_err(s, slab, "inuse %u > max %u",
1391 			slab->inuse, slab->objects);
1392 		return 0;
1393 	}
1394 	/* Slab_pad_check fixes things up after itself */
1395 	slab_pad_check(s, slab);
1396 	return 1;
1397 }
1398 
1399 /*
1400  * Determine if a certain object in a slab is on the freelist. Must hold the
1401  * slab lock to guarantee that the chains are in a consistent state.
1402  */
1403 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1404 {
1405 	int nr = 0;
1406 	void *fp;
1407 	void *object = NULL;
1408 	int max_objects;
1409 
1410 	fp = slab->freelist;
1411 	while (fp && nr <= slab->objects) {
1412 		if (fp == search)
1413 			return 1;
1414 		if (!check_valid_pointer(s, slab, fp)) {
1415 			if (object) {
1416 				object_err(s, slab, object,
1417 					"Freechain corrupt");
1418 				set_freepointer(s, object, NULL);
1419 			} else {
1420 				slab_err(s, slab, "Freepointer corrupt");
1421 				slab->freelist = NULL;
1422 				slab->inuse = slab->objects;
1423 				slab_fix(s, "Freelist cleared");
1424 				return 0;
1425 			}
1426 			break;
1427 		}
1428 		object = fp;
1429 		fp = get_freepointer(s, object);
1430 		nr++;
1431 	}
1432 
1433 	max_objects = order_objects(slab_order(slab), s->size);
1434 	if (max_objects > MAX_OBJS_PER_PAGE)
1435 		max_objects = MAX_OBJS_PER_PAGE;
1436 
1437 	if (slab->objects != max_objects) {
1438 		slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1439 			 slab->objects, max_objects);
1440 		slab->objects = max_objects;
1441 		slab_fix(s, "Number of objects adjusted");
1442 	}
1443 	if (slab->inuse != slab->objects - nr) {
1444 		slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1445 			 slab->inuse, slab->objects - nr);
1446 		slab->inuse = slab->objects - nr;
1447 		slab_fix(s, "Object count adjusted");
1448 	}
1449 	return search == NULL;
1450 }
1451 
1452 static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1453 								int alloc)
1454 {
1455 	if (s->flags & SLAB_TRACE) {
1456 		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1457 			s->name,
1458 			alloc ? "alloc" : "free",
1459 			object, slab->inuse,
1460 			slab->freelist);
1461 
1462 		if (!alloc)
1463 			print_section(KERN_INFO, "Object ", (void *)object,
1464 					s->object_size);
1465 
1466 		dump_stack();
1467 	}
1468 }
1469 
1470 /*
1471  * Tracking of fully allocated slabs for debugging purposes.
1472  */
1473 static void add_full(struct kmem_cache *s,
1474 	struct kmem_cache_node *n, struct slab *slab)
1475 {
1476 	if (!(s->flags & SLAB_STORE_USER))
1477 		return;
1478 
1479 	lockdep_assert_held(&n->list_lock);
1480 	list_add(&slab->slab_list, &n->full);
1481 }
1482 
1483 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1484 {
1485 	if (!(s->flags & SLAB_STORE_USER))
1486 		return;
1487 
1488 	lockdep_assert_held(&n->list_lock);
1489 	list_del(&slab->slab_list);
1490 }
1491 
1492 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1493 {
1494 	return atomic_long_read(&n->nr_slabs);
1495 }
1496 
1497 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1498 {
1499 	struct kmem_cache_node *n = get_node(s, node);
1500 
1501 	atomic_long_inc(&n->nr_slabs);
1502 	atomic_long_add(objects, &n->total_objects);
1503 }
1504 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1505 {
1506 	struct kmem_cache_node *n = get_node(s, node);
1507 
1508 	atomic_long_dec(&n->nr_slabs);
1509 	atomic_long_sub(objects, &n->total_objects);
1510 }
1511 
1512 /* Object debug checks for alloc/free paths */
1513 static void setup_object_debug(struct kmem_cache *s, void *object)
1514 {
1515 	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1516 		return;
1517 
1518 	init_object(s, object, SLUB_RED_INACTIVE);
1519 	init_tracking(s, object);
1520 }
1521 
1522 static
1523 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1524 {
1525 	if (!kmem_cache_debug_flags(s, SLAB_POISON))
1526 		return;
1527 
1528 	metadata_access_enable();
1529 	memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1530 	metadata_access_disable();
1531 }
1532 
1533 static inline int alloc_consistency_checks(struct kmem_cache *s,
1534 					struct slab *slab, void *object)
1535 {
1536 	if (!check_slab(s, slab))
1537 		return 0;
1538 
1539 	if (!check_valid_pointer(s, slab, object)) {
1540 		object_err(s, slab, object, "Freelist Pointer check fails");
1541 		return 0;
1542 	}
1543 
1544 	if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1545 		return 0;
1546 
1547 	return 1;
1548 }
1549 
1550 static noinline bool alloc_debug_processing(struct kmem_cache *s,
1551 			struct slab *slab, void *object, int orig_size)
1552 {
1553 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1554 		if (!alloc_consistency_checks(s, slab, object))
1555 			goto bad;
1556 	}
1557 
1558 	/* Success. Perform special debug activities for allocs */
1559 	trace(s, slab, object, 1);
1560 	set_orig_size(s, object, orig_size);
1561 	init_object(s, object, SLUB_RED_ACTIVE);
1562 	return true;
1563 
1564 bad:
1565 	if (folio_test_slab(slab_folio(slab))) {
1566 		/*
1567 		 * If this is a slab page then lets do the best we can
1568 		 * to avoid issues in the future. Marking all objects
1569 		 * as used avoids touching the remaining objects.
1570 		 */
1571 		slab_fix(s, "Marking all objects used");
1572 		slab->inuse = slab->objects;
1573 		slab->freelist = NULL;
1574 	}
1575 	return false;
1576 }
1577 
1578 static inline int free_consistency_checks(struct kmem_cache *s,
1579 		struct slab *slab, void *object, unsigned long addr)
1580 {
1581 	if (!check_valid_pointer(s, slab, object)) {
1582 		slab_err(s, slab, "Invalid object pointer 0x%p", object);
1583 		return 0;
1584 	}
1585 
1586 	if (on_freelist(s, slab, object)) {
1587 		object_err(s, slab, object, "Object already free");
1588 		return 0;
1589 	}
1590 
1591 	if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1592 		return 0;
1593 
1594 	if (unlikely(s != slab->slab_cache)) {
1595 		if (!folio_test_slab(slab_folio(slab))) {
1596 			slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
1597 				 object);
1598 		} else if (!slab->slab_cache) {
1599 			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1600 			       object);
1601 			dump_stack();
1602 		} else
1603 			object_err(s, slab, object,
1604 					"page slab pointer corrupt.");
1605 		return 0;
1606 	}
1607 	return 1;
1608 }
1609 
1610 /*
1611  * Parse a block of slab_debug options. Blocks are delimited by ';'
1612  *
1613  * @str:    start of block
1614  * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1615  * @slabs:  return start of list of slabs, or NULL when there's no list
1616  * @init:   assume this is initial parsing and not per-kmem-create parsing
1617  *
1618  * returns the start of next block if there's any, or NULL
1619  */
1620 static char *
1621 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1622 {
1623 	bool higher_order_disable = false;
1624 
1625 	/* Skip any completely empty blocks */
1626 	while (*str && *str == ';')
1627 		str++;
1628 
1629 	if (*str == ',') {
1630 		/*
1631 		 * No options but restriction on slabs. This means full
1632 		 * debugging for slabs matching a pattern.
1633 		 */
1634 		*flags = DEBUG_DEFAULT_FLAGS;
1635 		goto check_slabs;
1636 	}
1637 	*flags = 0;
1638 
1639 	/* Determine which debug features should be switched on */
1640 	for (; *str && *str != ',' && *str != ';'; str++) {
1641 		switch (tolower(*str)) {
1642 		case '-':
1643 			*flags = 0;
1644 			break;
1645 		case 'f':
1646 			*flags |= SLAB_CONSISTENCY_CHECKS;
1647 			break;
1648 		case 'z':
1649 			*flags |= SLAB_RED_ZONE;
1650 			break;
1651 		case 'p':
1652 			*flags |= SLAB_POISON;
1653 			break;
1654 		case 'u':
1655 			*flags |= SLAB_STORE_USER;
1656 			break;
1657 		case 't':
1658 			*flags |= SLAB_TRACE;
1659 			break;
1660 		case 'a':
1661 			*flags |= SLAB_FAILSLAB;
1662 			break;
1663 		case 'o':
1664 			/*
1665 			 * Avoid enabling debugging on caches if its minimum
1666 			 * order would increase as a result.
1667 			 */
1668 			higher_order_disable = true;
1669 			break;
1670 		default:
1671 			if (init)
1672 				pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1673 		}
1674 	}
1675 check_slabs:
1676 	if (*str == ',')
1677 		*slabs = ++str;
1678 	else
1679 		*slabs = NULL;
1680 
1681 	/* Skip over the slab list */
1682 	while (*str && *str != ';')
1683 		str++;
1684 
1685 	/* Skip any completely empty blocks */
1686 	while (*str && *str == ';')
1687 		str++;
1688 
1689 	if (init && higher_order_disable)
1690 		disable_higher_order_debug = 1;
1691 
1692 	if (*str)
1693 		return str;
1694 	else
1695 		return NULL;
1696 }
1697 
1698 static int __init setup_slub_debug(char *str)
1699 {
1700 	slab_flags_t flags;
1701 	slab_flags_t global_flags;
1702 	char *saved_str;
1703 	char *slab_list;
1704 	bool global_slub_debug_changed = false;
1705 	bool slab_list_specified = false;
1706 
1707 	global_flags = DEBUG_DEFAULT_FLAGS;
1708 	if (*str++ != '=' || !*str)
1709 		/*
1710 		 * No options specified. Switch on full debugging.
1711 		 */
1712 		goto out;
1713 
1714 	saved_str = str;
1715 	while (str) {
1716 		str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1717 
1718 		if (!slab_list) {
1719 			global_flags = flags;
1720 			global_slub_debug_changed = true;
1721 		} else {
1722 			slab_list_specified = true;
1723 			if (flags & SLAB_STORE_USER)
1724 				stack_depot_request_early_init();
1725 		}
1726 	}
1727 
1728 	/*
1729 	 * For backwards compatibility, a single list of flags with list of
1730 	 * slabs means debugging is only changed for those slabs, so the global
1731 	 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1732 	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1733 	 * long as there is no option specifying flags without a slab list.
1734 	 */
1735 	if (slab_list_specified) {
1736 		if (!global_slub_debug_changed)
1737 			global_flags = slub_debug;
1738 		slub_debug_string = saved_str;
1739 	}
1740 out:
1741 	slub_debug = global_flags;
1742 	if (slub_debug & SLAB_STORE_USER)
1743 		stack_depot_request_early_init();
1744 	if (slub_debug != 0 || slub_debug_string)
1745 		static_branch_enable(&slub_debug_enabled);
1746 	else
1747 		static_branch_disable(&slub_debug_enabled);
1748 	if ((static_branch_unlikely(&init_on_alloc) ||
1749 	     static_branch_unlikely(&init_on_free)) &&
1750 	    (slub_debug & SLAB_POISON))
1751 		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1752 	return 1;
1753 }
1754 
1755 __setup("slab_debug", setup_slub_debug);
1756 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0);
1757 
1758 /*
1759  * kmem_cache_flags - apply debugging options to the cache
1760  * @flags:		flags to set
1761  * @name:		name of the cache
1762  *
1763  * Debug option(s) are applied to @flags. In addition to the debug
1764  * option(s), if a slab name (or multiple) is specified i.e.
1765  * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1766  * then only the select slabs will receive the debug option(s).
1767  */
1768 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1769 {
1770 	char *iter;
1771 	size_t len;
1772 	char *next_block;
1773 	slab_flags_t block_flags;
1774 	slab_flags_t slub_debug_local = slub_debug;
1775 
1776 	if (flags & SLAB_NO_USER_FLAGS)
1777 		return flags;
1778 
1779 	/*
1780 	 * If the slab cache is for debugging (e.g. kmemleak) then
1781 	 * don't store user (stack trace) information by default,
1782 	 * but let the user enable it via the command line below.
1783 	 */
1784 	if (flags & SLAB_NOLEAKTRACE)
1785 		slub_debug_local &= ~SLAB_STORE_USER;
1786 
1787 	len = strlen(name);
1788 	next_block = slub_debug_string;
1789 	/* Go through all blocks of debug options, see if any matches our slab's name */
1790 	while (next_block) {
1791 		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1792 		if (!iter)
1793 			continue;
1794 		/* Found a block that has a slab list, search it */
1795 		while (*iter) {
1796 			char *end, *glob;
1797 			size_t cmplen;
1798 
1799 			end = strchrnul(iter, ',');
1800 			if (next_block && next_block < end)
1801 				end = next_block - 1;
1802 
1803 			glob = strnchr(iter, end - iter, '*');
1804 			if (glob)
1805 				cmplen = glob - iter;
1806 			else
1807 				cmplen = max_t(size_t, len, (end - iter));
1808 
1809 			if (!strncmp(name, iter, cmplen)) {
1810 				flags |= block_flags;
1811 				return flags;
1812 			}
1813 
1814 			if (!*end || *end == ';')
1815 				break;
1816 			iter = end + 1;
1817 		}
1818 	}
1819 
1820 	return flags | slub_debug_local;
1821 }
1822 #else /* !CONFIG_SLUB_DEBUG */
1823 static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1824 static inline
1825 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1826 
1827 static inline bool alloc_debug_processing(struct kmem_cache *s,
1828 	struct slab *slab, void *object, int orig_size) { return true; }
1829 
1830 static inline bool free_debug_processing(struct kmem_cache *s,
1831 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
1832 	unsigned long addr, depot_stack_handle_t handle) { return true; }
1833 
1834 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
1835 static inline int check_object(struct kmem_cache *s, struct slab *slab,
1836 			void *object, u8 val) { return 1; }
1837 static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
1838 static inline void set_track(struct kmem_cache *s, void *object,
1839 			     enum track_item alloc, unsigned long addr) {}
1840 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1841 					struct slab *slab) {}
1842 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1843 					struct slab *slab) {}
1844 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1845 {
1846 	return flags;
1847 }
1848 #define slub_debug 0
1849 
1850 #define disable_higher_order_debug 0
1851 
1852 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1853 							{ return 0; }
1854 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1855 							int objects) {}
1856 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1857 							int objects) {}
1858 
1859 #ifndef CONFIG_SLUB_TINY
1860 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1861 			       void **freelist, void *nextfree)
1862 {
1863 	return false;
1864 }
1865 #endif
1866 #endif /* CONFIG_SLUB_DEBUG */
1867 
1868 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
1869 {
1870 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1871 		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
1872 }
1873 
1874 #ifdef CONFIG_MEMCG_KMEM
1875 static inline void memcg_free_slab_cgroups(struct slab *slab)
1876 {
1877 	kfree(slab_objcgs(slab));
1878 	slab->memcg_data = 0;
1879 }
1880 
1881 static inline size_t obj_full_size(struct kmem_cache *s)
1882 {
1883 	/*
1884 	 * For each accounted object there is an extra space which is used
1885 	 * to store obj_cgroup membership. Charge it too.
1886 	 */
1887 	return s->size + sizeof(struct obj_cgroup *);
1888 }
1889 
1890 /*
1891  * Returns false if the allocation should fail.
1892  */
1893 static bool __memcg_slab_pre_alloc_hook(struct kmem_cache *s,
1894 					struct list_lru *lru,
1895 					struct obj_cgroup **objcgp,
1896 					size_t objects, gfp_t flags)
1897 {
1898 	/*
1899 	 * The obtained objcg pointer is safe to use within the current scope,
1900 	 * defined by current task or set_active_memcg() pair.
1901 	 * obj_cgroup_get() is used to get a permanent reference.
1902 	 */
1903 	struct obj_cgroup *objcg = current_obj_cgroup();
1904 	if (!objcg)
1905 		return true;
1906 
1907 	if (lru) {
1908 		int ret;
1909 		struct mem_cgroup *memcg;
1910 
1911 		memcg = get_mem_cgroup_from_objcg(objcg);
1912 		ret = memcg_list_lru_alloc(memcg, lru, flags);
1913 		css_put(&memcg->css);
1914 
1915 		if (ret)
1916 			return false;
1917 	}
1918 
1919 	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
1920 		return false;
1921 
1922 	*objcgp = objcg;
1923 	return true;
1924 }
1925 
1926 /*
1927  * Returns false if the allocation should fail.
1928  */
1929 static __fastpath_inline
1930 bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
1931 			       struct obj_cgroup **objcgp, size_t objects,
1932 			       gfp_t flags)
1933 {
1934 	if (!memcg_kmem_online())
1935 		return true;
1936 
1937 	if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
1938 		return true;
1939 
1940 	return likely(__memcg_slab_pre_alloc_hook(s, lru, objcgp, objects,
1941 						  flags));
1942 }
1943 
1944 static void __memcg_slab_post_alloc_hook(struct kmem_cache *s,
1945 					 struct obj_cgroup *objcg,
1946 					 gfp_t flags, size_t size,
1947 					 void **p)
1948 {
1949 	struct slab *slab;
1950 	unsigned long off;
1951 	size_t i;
1952 
1953 	flags &= gfp_allowed_mask;
1954 
1955 	for (i = 0; i < size; i++) {
1956 		if (likely(p[i])) {
1957 			slab = virt_to_slab(p[i]);
1958 
1959 			if (!slab_objcgs(slab) &&
1960 			    memcg_alloc_slab_cgroups(slab, s, flags, false)) {
1961 				obj_cgroup_uncharge(objcg, obj_full_size(s));
1962 				continue;
1963 			}
1964 
1965 			off = obj_to_index(s, slab, p[i]);
1966 			obj_cgroup_get(objcg);
1967 			slab_objcgs(slab)[off] = objcg;
1968 			mod_objcg_state(objcg, slab_pgdat(slab),
1969 					cache_vmstat_idx(s), obj_full_size(s));
1970 		} else {
1971 			obj_cgroup_uncharge(objcg, obj_full_size(s));
1972 		}
1973 	}
1974 }
1975 
1976 static __fastpath_inline
1977 void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
1978 				gfp_t flags, size_t size, void **p)
1979 {
1980 	if (likely(!memcg_kmem_online() || !objcg))
1981 		return;
1982 
1983 	return __memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
1984 }
1985 
1986 static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
1987 				   void **p, int objects,
1988 				   struct obj_cgroup **objcgs)
1989 {
1990 	for (int i = 0; i < objects; i++) {
1991 		struct obj_cgroup *objcg;
1992 		unsigned int off;
1993 
1994 		off = obj_to_index(s, slab, p[i]);
1995 		objcg = objcgs[off];
1996 		if (!objcg)
1997 			continue;
1998 
1999 		objcgs[off] = NULL;
2000 		obj_cgroup_uncharge(objcg, obj_full_size(s));
2001 		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
2002 				-obj_full_size(s));
2003 		obj_cgroup_put(objcg);
2004 	}
2005 }
2006 
2007 static __fastpath_inline
2008 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2009 			  int objects)
2010 {
2011 	struct obj_cgroup **objcgs;
2012 
2013 	if (!memcg_kmem_online())
2014 		return;
2015 
2016 	objcgs = slab_objcgs(slab);
2017 	if (likely(!objcgs))
2018 		return;
2019 
2020 	__memcg_slab_free_hook(s, slab, p, objects, objcgs);
2021 }
2022 
2023 static inline
2024 void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
2025 			   struct obj_cgroup *objcg)
2026 {
2027 	if (objcg)
2028 		obj_cgroup_uncharge(objcg, objects * obj_full_size(s));
2029 }
2030 #else /* CONFIG_MEMCG_KMEM */
2031 static inline void memcg_free_slab_cgroups(struct slab *slab)
2032 {
2033 }
2034 
2035 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
2036 					     struct list_lru *lru,
2037 					     struct obj_cgroup **objcgp,
2038 					     size_t objects, gfp_t flags)
2039 {
2040 	return true;
2041 }
2042 
2043 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
2044 					      struct obj_cgroup *objcg,
2045 					      gfp_t flags, size_t size,
2046 					      void **p)
2047 {
2048 }
2049 
2050 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2051 					void **p, int objects)
2052 {
2053 }
2054 
2055 static inline
2056 void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
2057 				 struct obj_cgroup *objcg)
2058 {
2059 }
2060 #endif /* CONFIG_MEMCG_KMEM */
2061 
2062 /*
2063  * Hooks for other subsystems that check memory allocations. In a typical
2064  * production configuration these hooks all should produce no code at all.
2065  *
2066  * Returns true if freeing of the object can proceed, false if its reuse
2067  * was delayed by KASAN quarantine, or it was returned to KFENCE.
2068  */
2069 static __always_inline
2070 bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
2071 {
2072 	kmemleak_free_recursive(x, s->flags);
2073 	kmsan_slab_free(s, x);
2074 
2075 	debug_check_no_locks_freed(x, s->object_size);
2076 
2077 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
2078 		debug_check_no_obj_freed(x, s->object_size);
2079 
2080 	/* Use KCSAN to help debug racy use-after-free. */
2081 	if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
2082 		__kcsan_check_access(x, s->object_size,
2083 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2084 
2085 	if (kfence_free(x))
2086 		return false;
2087 
2088 	/*
2089 	 * As memory initialization might be integrated into KASAN,
2090 	 * kasan_slab_free and initialization memset's must be
2091 	 * kept together to avoid discrepancies in behavior.
2092 	 *
2093 	 * The initialization memset's clear the object and the metadata,
2094 	 * but don't touch the SLAB redzone.
2095 	 *
2096 	 * The object's freepointer is also avoided if stored outside the
2097 	 * object.
2098 	 */
2099 	if (unlikely(init)) {
2100 		int rsize;
2101 		unsigned int inuse;
2102 
2103 		inuse = get_info_end(s);
2104 		if (!kasan_has_integrated_init())
2105 			memset(kasan_reset_tag(x), 0, s->object_size);
2106 		rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2107 		memset((char *)kasan_reset_tag(x) + inuse, 0,
2108 		       s->size - inuse - rsize);
2109 	}
2110 	/* KASAN might put x into memory quarantine, delaying its reuse. */
2111 	return !kasan_slab_free(s, x, init);
2112 }
2113 
2114 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
2115 					   void **head, void **tail,
2116 					   int *cnt)
2117 {
2118 
2119 	void *object;
2120 	void *next = *head;
2121 	void *old_tail = *tail;
2122 	bool init;
2123 
2124 	if (is_kfence_address(next)) {
2125 		slab_free_hook(s, next, false);
2126 		return false;
2127 	}
2128 
2129 	/* Head and tail of the reconstructed freelist */
2130 	*head = NULL;
2131 	*tail = NULL;
2132 
2133 	init = slab_want_init_on_free(s);
2134 
2135 	do {
2136 		object = next;
2137 		next = get_freepointer(s, object);
2138 
2139 		/* If object's reuse doesn't have to be delayed */
2140 		if (likely(slab_free_hook(s, object, init))) {
2141 			/* Move object to the new freelist */
2142 			set_freepointer(s, object, *head);
2143 			*head = object;
2144 			if (!*tail)
2145 				*tail = object;
2146 		} else {
2147 			/*
2148 			 * Adjust the reconstructed freelist depth
2149 			 * accordingly if object's reuse is delayed.
2150 			 */
2151 			--(*cnt);
2152 		}
2153 	} while (object != old_tail);
2154 
2155 	return *head != NULL;
2156 }
2157 
2158 static void *setup_object(struct kmem_cache *s, void *object)
2159 {
2160 	setup_object_debug(s, object);
2161 	object = kasan_init_slab_obj(s, object);
2162 	if (unlikely(s->ctor)) {
2163 		kasan_unpoison_new_object(s, object);
2164 		s->ctor(object);
2165 		kasan_poison_new_object(s, object);
2166 	}
2167 	return object;
2168 }
2169 
2170 /*
2171  * Slab allocation and freeing
2172  */
2173 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
2174 		struct kmem_cache_order_objects oo)
2175 {
2176 	struct folio *folio;
2177 	struct slab *slab;
2178 	unsigned int order = oo_order(oo);
2179 
2180 	folio = (struct folio *)alloc_pages_node(node, flags, order);
2181 	if (!folio)
2182 		return NULL;
2183 
2184 	slab = folio_slab(folio);
2185 	__folio_set_slab(folio);
2186 	/* Make the flag visible before any changes to folio->mapping */
2187 	smp_wmb();
2188 	if (folio_is_pfmemalloc(folio))
2189 		slab_set_pfmemalloc(slab);
2190 
2191 	return slab;
2192 }
2193 
2194 #ifdef CONFIG_SLAB_FREELIST_RANDOM
2195 /* Pre-initialize the random sequence cache */
2196 static int init_cache_random_seq(struct kmem_cache *s)
2197 {
2198 	unsigned int count = oo_objects(s->oo);
2199 	int err;
2200 
2201 	/* Bailout if already initialised */
2202 	if (s->random_seq)
2203 		return 0;
2204 
2205 	err = cache_random_seq_create(s, count, GFP_KERNEL);
2206 	if (err) {
2207 		pr_err("SLUB: Unable to initialize free list for %s\n",
2208 			s->name);
2209 		return err;
2210 	}
2211 
2212 	/* Transform to an offset on the set of pages */
2213 	if (s->random_seq) {
2214 		unsigned int i;
2215 
2216 		for (i = 0; i < count; i++)
2217 			s->random_seq[i] *= s->size;
2218 	}
2219 	return 0;
2220 }
2221 
2222 /* Initialize each random sequence freelist per cache */
2223 static void __init init_freelist_randomization(void)
2224 {
2225 	struct kmem_cache *s;
2226 
2227 	mutex_lock(&slab_mutex);
2228 
2229 	list_for_each_entry(s, &slab_caches, list)
2230 		init_cache_random_seq(s);
2231 
2232 	mutex_unlock(&slab_mutex);
2233 }
2234 
2235 /* Get the next entry on the pre-computed freelist randomized */
2236 static void *next_freelist_entry(struct kmem_cache *s,
2237 				unsigned long *pos, void *start,
2238 				unsigned long page_limit,
2239 				unsigned long freelist_count)
2240 {
2241 	unsigned int idx;
2242 
2243 	/*
2244 	 * If the target page allocation failed, the number of objects on the
2245 	 * page might be smaller than the usual size defined by the cache.
2246 	 */
2247 	do {
2248 		idx = s->random_seq[*pos];
2249 		*pos += 1;
2250 		if (*pos >= freelist_count)
2251 			*pos = 0;
2252 	} while (unlikely(idx >= page_limit));
2253 
2254 	return (char *)start + idx;
2255 }
2256 
2257 /* Shuffle the single linked freelist based on a random pre-computed sequence */
2258 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2259 {
2260 	void *start;
2261 	void *cur;
2262 	void *next;
2263 	unsigned long idx, pos, page_limit, freelist_count;
2264 
2265 	if (slab->objects < 2 || !s->random_seq)
2266 		return false;
2267 
2268 	freelist_count = oo_objects(s->oo);
2269 	pos = get_random_u32_below(freelist_count);
2270 
2271 	page_limit = slab->objects * s->size;
2272 	start = fixup_red_left(s, slab_address(slab));
2273 
2274 	/* First entry is used as the base of the freelist */
2275 	cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
2276 	cur = setup_object(s, cur);
2277 	slab->freelist = cur;
2278 
2279 	for (idx = 1; idx < slab->objects; idx++) {
2280 		next = next_freelist_entry(s, &pos, start, page_limit,
2281 			freelist_count);
2282 		next = setup_object(s, next);
2283 		set_freepointer(s, cur, next);
2284 		cur = next;
2285 	}
2286 	set_freepointer(s, cur, NULL);
2287 
2288 	return true;
2289 }
2290 #else
2291 static inline int init_cache_random_seq(struct kmem_cache *s)
2292 {
2293 	return 0;
2294 }
2295 static inline void init_freelist_randomization(void) { }
2296 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2297 {
2298 	return false;
2299 }
2300 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
2301 
2302 static __always_inline void account_slab(struct slab *slab, int order,
2303 					 struct kmem_cache *s, gfp_t gfp)
2304 {
2305 	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
2306 		memcg_alloc_slab_cgroups(slab, s, gfp, true);
2307 
2308 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2309 			    PAGE_SIZE << order);
2310 }
2311 
2312 static __always_inline void unaccount_slab(struct slab *slab, int order,
2313 					   struct kmem_cache *s)
2314 {
2315 	if (memcg_kmem_online())
2316 		memcg_free_slab_cgroups(slab);
2317 
2318 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2319 			    -(PAGE_SIZE << order));
2320 }
2321 
2322 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
2323 {
2324 	struct slab *slab;
2325 	struct kmem_cache_order_objects oo = s->oo;
2326 	gfp_t alloc_gfp;
2327 	void *start, *p, *next;
2328 	int idx;
2329 	bool shuffle;
2330 
2331 	flags &= gfp_allowed_mask;
2332 
2333 	flags |= s->allocflags;
2334 
2335 	/*
2336 	 * Let the initial higher-order allocation fail under memory pressure
2337 	 * so we fall-back to the minimum order allocation.
2338 	 */
2339 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
2340 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
2341 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
2342 
2343 	slab = alloc_slab_page(alloc_gfp, node, oo);
2344 	if (unlikely(!slab)) {
2345 		oo = s->min;
2346 		alloc_gfp = flags;
2347 		/*
2348 		 * Allocation may have failed due to fragmentation.
2349 		 * Try a lower order alloc if possible
2350 		 */
2351 		slab = alloc_slab_page(alloc_gfp, node, oo);
2352 		if (unlikely(!slab))
2353 			return NULL;
2354 		stat(s, ORDER_FALLBACK);
2355 	}
2356 
2357 	slab->objects = oo_objects(oo);
2358 	slab->inuse = 0;
2359 	slab->frozen = 0;
2360 
2361 	account_slab(slab, oo_order(oo), s, flags);
2362 
2363 	slab->slab_cache = s;
2364 
2365 	kasan_poison_slab(slab);
2366 
2367 	start = slab_address(slab);
2368 
2369 	setup_slab_debug(s, slab, start);
2370 
2371 	shuffle = shuffle_freelist(s, slab);
2372 
2373 	if (!shuffle) {
2374 		start = fixup_red_left(s, start);
2375 		start = setup_object(s, start);
2376 		slab->freelist = start;
2377 		for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2378 			next = p + s->size;
2379 			next = setup_object(s, next);
2380 			set_freepointer(s, p, next);
2381 			p = next;
2382 		}
2383 		set_freepointer(s, p, NULL);
2384 	}
2385 
2386 	return slab;
2387 }
2388 
2389 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
2390 {
2391 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
2392 		flags = kmalloc_fix_flags(flags);
2393 
2394 	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2395 
2396 	return allocate_slab(s,
2397 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
2398 }
2399 
2400 static void __free_slab(struct kmem_cache *s, struct slab *slab)
2401 {
2402 	struct folio *folio = slab_folio(slab);
2403 	int order = folio_order(folio);
2404 	int pages = 1 << order;
2405 
2406 	__slab_clear_pfmemalloc(slab);
2407 	folio->mapping = NULL;
2408 	/* Make the mapping reset visible before clearing the flag */
2409 	smp_wmb();
2410 	__folio_clear_slab(folio);
2411 	mm_account_reclaimed_pages(pages);
2412 	unaccount_slab(slab, order, s);
2413 	__free_pages(&folio->page, order);
2414 }
2415 
2416 static void rcu_free_slab(struct rcu_head *h)
2417 {
2418 	struct slab *slab = container_of(h, struct slab, rcu_head);
2419 
2420 	__free_slab(slab->slab_cache, slab);
2421 }
2422 
2423 static void free_slab(struct kmem_cache *s, struct slab *slab)
2424 {
2425 	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
2426 		void *p;
2427 
2428 		slab_pad_check(s, slab);
2429 		for_each_object(p, s, slab_address(slab), slab->objects)
2430 			check_object(s, slab, p, SLUB_RED_INACTIVE);
2431 	}
2432 
2433 	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
2434 		call_rcu(&slab->rcu_head, rcu_free_slab);
2435 	else
2436 		__free_slab(s, slab);
2437 }
2438 
2439 static void discard_slab(struct kmem_cache *s, struct slab *slab)
2440 {
2441 	dec_slabs_node(s, slab_nid(slab), slab->objects);
2442 	free_slab(s, slab);
2443 }
2444 
2445 /*
2446  * SLUB reuses PG_workingset bit to keep track of whether it's on
2447  * the per-node partial list.
2448  */
2449 static inline bool slab_test_node_partial(const struct slab *slab)
2450 {
2451 	return folio_test_workingset((struct folio *)slab_folio(slab));
2452 }
2453 
2454 static inline void slab_set_node_partial(struct slab *slab)
2455 {
2456 	set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2457 }
2458 
2459 static inline void slab_clear_node_partial(struct slab *slab)
2460 {
2461 	clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2462 }
2463 
2464 /*
2465  * Management of partially allocated slabs.
2466  */
2467 static inline void
2468 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
2469 {
2470 	n->nr_partial++;
2471 	if (tail == DEACTIVATE_TO_TAIL)
2472 		list_add_tail(&slab->slab_list, &n->partial);
2473 	else
2474 		list_add(&slab->slab_list, &n->partial);
2475 	slab_set_node_partial(slab);
2476 }
2477 
2478 static inline void add_partial(struct kmem_cache_node *n,
2479 				struct slab *slab, int tail)
2480 {
2481 	lockdep_assert_held(&n->list_lock);
2482 	__add_partial(n, slab, tail);
2483 }
2484 
2485 static inline void remove_partial(struct kmem_cache_node *n,
2486 					struct slab *slab)
2487 {
2488 	lockdep_assert_held(&n->list_lock);
2489 	list_del(&slab->slab_list);
2490 	slab_clear_node_partial(slab);
2491 	n->nr_partial--;
2492 }
2493 
2494 /*
2495  * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
2496  * slab from the n->partial list. Remove only a single object from the slab, do
2497  * the alloc_debug_processing() checks and leave the slab on the list, or move
2498  * it to full list if it was the last free object.
2499  */
2500 static void *alloc_single_from_partial(struct kmem_cache *s,
2501 		struct kmem_cache_node *n, struct slab *slab, int orig_size)
2502 {
2503 	void *object;
2504 
2505 	lockdep_assert_held(&n->list_lock);
2506 
2507 	object = slab->freelist;
2508 	slab->freelist = get_freepointer(s, object);
2509 	slab->inuse++;
2510 
2511 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
2512 		remove_partial(n, slab);
2513 		return NULL;
2514 	}
2515 
2516 	if (slab->inuse == slab->objects) {
2517 		remove_partial(n, slab);
2518 		add_full(s, n, slab);
2519 	}
2520 
2521 	return object;
2522 }
2523 
2524 /*
2525  * Called only for kmem_cache_debug() caches to allocate from a freshly
2526  * allocated slab. Allocate a single object instead of whole freelist
2527  * and put the slab to the partial (or full) list.
2528  */
2529 static void *alloc_single_from_new_slab(struct kmem_cache *s,
2530 					struct slab *slab, int orig_size)
2531 {
2532 	int nid = slab_nid(slab);
2533 	struct kmem_cache_node *n = get_node(s, nid);
2534 	unsigned long flags;
2535 	void *object;
2536 
2537 
2538 	object = slab->freelist;
2539 	slab->freelist = get_freepointer(s, object);
2540 	slab->inuse = 1;
2541 
2542 	if (!alloc_debug_processing(s, slab, object, orig_size))
2543 		/*
2544 		 * It's not really expected that this would fail on a
2545 		 * freshly allocated slab, but a concurrent memory
2546 		 * corruption in theory could cause that.
2547 		 */
2548 		return NULL;
2549 
2550 	spin_lock_irqsave(&n->list_lock, flags);
2551 
2552 	if (slab->inuse == slab->objects)
2553 		add_full(s, n, slab);
2554 	else
2555 		add_partial(n, slab, DEACTIVATE_TO_HEAD);
2556 
2557 	inc_slabs_node(s, nid, slab->objects);
2558 	spin_unlock_irqrestore(&n->list_lock, flags);
2559 
2560 	return object;
2561 }
2562 
2563 #ifdef CONFIG_SLUB_CPU_PARTIAL
2564 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2565 #else
2566 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
2567 				   int drain) { }
2568 #endif
2569 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
2570 
2571 /*
2572  * Try to allocate a partial slab from a specific node.
2573  */
2574 static struct slab *get_partial_node(struct kmem_cache *s,
2575 				     struct kmem_cache_node *n,
2576 				     struct partial_context *pc)
2577 {
2578 	struct slab *slab, *slab2, *partial = NULL;
2579 	unsigned long flags;
2580 	unsigned int partial_slabs = 0;
2581 
2582 	/*
2583 	 * Racy check. If we mistakenly see no partial slabs then we
2584 	 * just allocate an empty slab. If we mistakenly try to get a
2585 	 * partial slab and there is none available then get_partial()
2586 	 * will return NULL.
2587 	 */
2588 	if (!n || !n->nr_partial)
2589 		return NULL;
2590 
2591 	spin_lock_irqsave(&n->list_lock, flags);
2592 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
2593 		if (!pfmemalloc_match(slab, pc->flags))
2594 			continue;
2595 
2596 		if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
2597 			void *object = alloc_single_from_partial(s, n, slab,
2598 							pc->orig_size);
2599 			if (object) {
2600 				partial = slab;
2601 				pc->object = object;
2602 				break;
2603 			}
2604 			continue;
2605 		}
2606 
2607 		remove_partial(n, slab);
2608 
2609 		if (!partial) {
2610 			partial = slab;
2611 			stat(s, ALLOC_FROM_PARTIAL);
2612 		} else {
2613 			put_cpu_partial(s, slab, 0);
2614 			stat(s, CPU_PARTIAL_NODE);
2615 			partial_slabs++;
2616 		}
2617 #ifdef CONFIG_SLUB_CPU_PARTIAL
2618 		if (!kmem_cache_has_cpu_partial(s)
2619 			|| partial_slabs > s->cpu_partial_slabs / 2)
2620 			break;
2621 #else
2622 		break;
2623 #endif
2624 
2625 	}
2626 	spin_unlock_irqrestore(&n->list_lock, flags);
2627 	return partial;
2628 }
2629 
2630 /*
2631  * Get a slab from somewhere. Search in increasing NUMA distances.
2632  */
2633 static struct slab *get_any_partial(struct kmem_cache *s,
2634 				    struct partial_context *pc)
2635 {
2636 #ifdef CONFIG_NUMA
2637 	struct zonelist *zonelist;
2638 	struct zoneref *z;
2639 	struct zone *zone;
2640 	enum zone_type highest_zoneidx = gfp_zone(pc->flags);
2641 	struct slab *slab;
2642 	unsigned int cpuset_mems_cookie;
2643 
2644 	/*
2645 	 * The defrag ratio allows a configuration of the tradeoffs between
2646 	 * inter node defragmentation and node local allocations. A lower
2647 	 * defrag_ratio increases the tendency to do local allocations
2648 	 * instead of attempting to obtain partial slabs from other nodes.
2649 	 *
2650 	 * If the defrag_ratio is set to 0 then kmalloc() always
2651 	 * returns node local objects. If the ratio is higher then kmalloc()
2652 	 * may return off node objects because partial slabs are obtained
2653 	 * from other nodes and filled up.
2654 	 *
2655 	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2656 	 * (which makes defrag_ratio = 1000) then every (well almost)
2657 	 * allocation will first attempt to defrag slab caches on other nodes.
2658 	 * This means scanning over all nodes to look for partial slabs which
2659 	 * may be expensive if we do it every time we are trying to find a slab
2660 	 * with available objects.
2661 	 */
2662 	if (!s->remote_node_defrag_ratio ||
2663 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
2664 		return NULL;
2665 
2666 	do {
2667 		cpuset_mems_cookie = read_mems_allowed_begin();
2668 		zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
2669 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2670 			struct kmem_cache_node *n;
2671 
2672 			n = get_node(s, zone_to_nid(zone));
2673 
2674 			if (n && cpuset_zone_allowed(zone, pc->flags) &&
2675 					n->nr_partial > s->min_partial) {
2676 				slab = get_partial_node(s, n, pc);
2677 				if (slab) {
2678 					/*
2679 					 * Don't check read_mems_allowed_retry()
2680 					 * here - if mems_allowed was updated in
2681 					 * parallel, that was a harmless race
2682 					 * between allocation and the cpuset
2683 					 * update
2684 					 */
2685 					return slab;
2686 				}
2687 			}
2688 		}
2689 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
2690 #endif	/* CONFIG_NUMA */
2691 	return NULL;
2692 }
2693 
2694 /*
2695  * Get a partial slab, lock it and return it.
2696  */
2697 static struct slab *get_partial(struct kmem_cache *s, int node,
2698 				struct partial_context *pc)
2699 {
2700 	struct slab *slab;
2701 	int searchnode = node;
2702 
2703 	if (node == NUMA_NO_NODE)
2704 		searchnode = numa_mem_id();
2705 
2706 	slab = get_partial_node(s, get_node(s, searchnode), pc);
2707 	if (slab || node != NUMA_NO_NODE)
2708 		return slab;
2709 
2710 	return get_any_partial(s, pc);
2711 }
2712 
2713 #ifndef CONFIG_SLUB_TINY
2714 
2715 #ifdef CONFIG_PREEMPTION
2716 /*
2717  * Calculate the next globally unique transaction for disambiguation
2718  * during cmpxchg. The transactions start with the cpu number and are then
2719  * incremented by CONFIG_NR_CPUS.
2720  */
2721 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
2722 #else
2723 /*
2724  * No preemption supported therefore also no need to check for
2725  * different cpus.
2726  */
2727 #define TID_STEP 1
2728 #endif /* CONFIG_PREEMPTION */
2729 
2730 static inline unsigned long next_tid(unsigned long tid)
2731 {
2732 	return tid + TID_STEP;
2733 }
2734 
2735 #ifdef SLUB_DEBUG_CMPXCHG
2736 static inline unsigned int tid_to_cpu(unsigned long tid)
2737 {
2738 	return tid % TID_STEP;
2739 }
2740 
2741 static inline unsigned long tid_to_event(unsigned long tid)
2742 {
2743 	return tid / TID_STEP;
2744 }
2745 #endif
2746 
2747 static inline unsigned int init_tid(int cpu)
2748 {
2749 	return cpu;
2750 }
2751 
2752 static inline void note_cmpxchg_failure(const char *n,
2753 		const struct kmem_cache *s, unsigned long tid)
2754 {
2755 #ifdef SLUB_DEBUG_CMPXCHG
2756 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2757 
2758 	pr_info("%s %s: cmpxchg redo ", n, s->name);
2759 
2760 #ifdef CONFIG_PREEMPTION
2761 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2762 		pr_warn("due to cpu change %d -> %d\n",
2763 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2764 	else
2765 #endif
2766 	if (tid_to_event(tid) != tid_to_event(actual_tid))
2767 		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2768 			tid_to_event(tid), tid_to_event(actual_tid));
2769 	else
2770 		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2771 			actual_tid, tid, next_tid(tid));
2772 #endif
2773 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2774 }
2775 
2776 static void init_kmem_cache_cpus(struct kmem_cache *s)
2777 {
2778 	int cpu;
2779 	struct kmem_cache_cpu *c;
2780 
2781 	for_each_possible_cpu(cpu) {
2782 		c = per_cpu_ptr(s->cpu_slab, cpu);
2783 		local_lock_init(&c->lock);
2784 		c->tid = init_tid(cpu);
2785 	}
2786 }
2787 
2788 /*
2789  * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
2790  * unfreezes the slabs and puts it on the proper list.
2791  * Assumes the slab has been already safely taken away from kmem_cache_cpu
2792  * by the caller.
2793  */
2794 static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
2795 			    void *freelist)
2796 {
2797 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
2798 	int free_delta = 0;
2799 	void *nextfree, *freelist_iter, *freelist_tail;
2800 	int tail = DEACTIVATE_TO_HEAD;
2801 	unsigned long flags = 0;
2802 	struct slab new;
2803 	struct slab old;
2804 
2805 	if (slab->freelist) {
2806 		stat(s, DEACTIVATE_REMOTE_FREES);
2807 		tail = DEACTIVATE_TO_TAIL;
2808 	}
2809 
2810 	/*
2811 	 * Stage one: Count the objects on cpu's freelist as free_delta and
2812 	 * remember the last object in freelist_tail for later splicing.
2813 	 */
2814 	freelist_tail = NULL;
2815 	freelist_iter = freelist;
2816 	while (freelist_iter) {
2817 		nextfree = get_freepointer(s, freelist_iter);
2818 
2819 		/*
2820 		 * If 'nextfree' is invalid, it is possible that the object at
2821 		 * 'freelist_iter' is already corrupted.  So isolate all objects
2822 		 * starting at 'freelist_iter' by skipping them.
2823 		 */
2824 		if (freelist_corrupted(s, slab, &freelist_iter, nextfree))
2825 			break;
2826 
2827 		freelist_tail = freelist_iter;
2828 		free_delta++;
2829 
2830 		freelist_iter = nextfree;
2831 	}
2832 
2833 	/*
2834 	 * Stage two: Unfreeze the slab while splicing the per-cpu
2835 	 * freelist to the head of slab's freelist.
2836 	 */
2837 	do {
2838 		old.freelist = READ_ONCE(slab->freelist);
2839 		old.counters = READ_ONCE(slab->counters);
2840 		VM_BUG_ON(!old.frozen);
2841 
2842 		/* Determine target state of the slab */
2843 		new.counters = old.counters;
2844 		new.frozen = 0;
2845 		if (freelist_tail) {
2846 			new.inuse -= free_delta;
2847 			set_freepointer(s, freelist_tail, old.freelist);
2848 			new.freelist = freelist;
2849 		} else {
2850 			new.freelist = old.freelist;
2851 		}
2852 	} while (!slab_update_freelist(s, slab,
2853 		old.freelist, old.counters,
2854 		new.freelist, new.counters,
2855 		"unfreezing slab"));
2856 
2857 	/*
2858 	 * Stage three: Manipulate the slab list based on the updated state.
2859 	 */
2860 	if (!new.inuse && n->nr_partial >= s->min_partial) {
2861 		stat(s, DEACTIVATE_EMPTY);
2862 		discard_slab(s, slab);
2863 		stat(s, FREE_SLAB);
2864 	} else if (new.freelist) {
2865 		spin_lock_irqsave(&n->list_lock, flags);
2866 		add_partial(n, slab, tail);
2867 		spin_unlock_irqrestore(&n->list_lock, flags);
2868 		stat(s, tail);
2869 	} else {
2870 		stat(s, DEACTIVATE_FULL);
2871 	}
2872 }
2873 
2874 #ifdef CONFIG_SLUB_CPU_PARTIAL
2875 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
2876 {
2877 	struct kmem_cache_node *n = NULL, *n2 = NULL;
2878 	struct slab *slab, *slab_to_discard = NULL;
2879 	unsigned long flags = 0;
2880 
2881 	while (partial_slab) {
2882 		slab = partial_slab;
2883 		partial_slab = slab->next;
2884 
2885 		n2 = get_node(s, slab_nid(slab));
2886 		if (n != n2) {
2887 			if (n)
2888 				spin_unlock_irqrestore(&n->list_lock, flags);
2889 
2890 			n = n2;
2891 			spin_lock_irqsave(&n->list_lock, flags);
2892 		}
2893 
2894 		if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
2895 			slab->next = slab_to_discard;
2896 			slab_to_discard = slab;
2897 		} else {
2898 			add_partial(n, slab, DEACTIVATE_TO_TAIL);
2899 			stat(s, FREE_ADD_PARTIAL);
2900 		}
2901 	}
2902 
2903 	if (n)
2904 		spin_unlock_irqrestore(&n->list_lock, flags);
2905 
2906 	while (slab_to_discard) {
2907 		slab = slab_to_discard;
2908 		slab_to_discard = slab_to_discard->next;
2909 
2910 		stat(s, DEACTIVATE_EMPTY);
2911 		discard_slab(s, slab);
2912 		stat(s, FREE_SLAB);
2913 	}
2914 }
2915 
2916 /*
2917  * Put all the cpu partial slabs to the node partial list.
2918  */
2919 static void put_partials(struct kmem_cache *s)
2920 {
2921 	struct slab *partial_slab;
2922 	unsigned long flags;
2923 
2924 	local_lock_irqsave(&s->cpu_slab->lock, flags);
2925 	partial_slab = this_cpu_read(s->cpu_slab->partial);
2926 	this_cpu_write(s->cpu_slab->partial, NULL);
2927 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2928 
2929 	if (partial_slab)
2930 		__put_partials(s, partial_slab);
2931 }
2932 
2933 static void put_partials_cpu(struct kmem_cache *s,
2934 			     struct kmem_cache_cpu *c)
2935 {
2936 	struct slab *partial_slab;
2937 
2938 	partial_slab = slub_percpu_partial(c);
2939 	c->partial = NULL;
2940 
2941 	if (partial_slab)
2942 		__put_partials(s, partial_slab);
2943 }
2944 
2945 /*
2946  * Put a slab into a partial slab slot if available.
2947  *
2948  * If we did not find a slot then simply move all the partials to the
2949  * per node partial list.
2950  */
2951 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
2952 {
2953 	struct slab *oldslab;
2954 	struct slab *slab_to_put = NULL;
2955 	unsigned long flags;
2956 	int slabs = 0;
2957 
2958 	local_lock_irqsave(&s->cpu_slab->lock, flags);
2959 
2960 	oldslab = this_cpu_read(s->cpu_slab->partial);
2961 
2962 	if (oldslab) {
2963 		if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
2964 			/*
2965 			 * Partial array is full. Move the existing set to the
2966 			 * per node partial list. Postpone the actual unfreezing
2967 			 * outside of the critical section.
2968 			 */
2969 			slab_to_put = oldslab;
2970 			oldslab = NULL;
2971 		} else {
2972 			slabs = oldslab->slabs;
2973 		}
2974 	}
2975 
2976 	slabs++;
2977 
2978 	slab->slabs = slabs;
2979 	slab->next = oldslab;
2980 
2981 	this_cpu_write(s->cpu_slab->partial, slab);
2982 
2983 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2984 
2985 	if (slab_to_put) {
2986 		__put_partials(s, slab_to_put);
2987 		stat(s, CPU_PARTIAL_DRAIN);
2988 	}
2989 }
2990 
2991 #else	/* CONFIG_SLUB_CPU_PARTIAL */
2992 
2993 static inline void put_partials(struct kmem_cache *s) { }
2994 static inline void put_partials_cpu(struct kmem_cache *s,
2995 				    struct kmem_cache_cpu *c) { }
2996 
2997 #endif	/* CONFIG_SLUB_CPU_PARTIAL */
2998 
2999 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
3000 {
3001 	unsigned long flags;
3002 	struct slab *slab;
3003 	void *freelist;
3004 
3005 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3006 
3007 	slab = c->slab;
3008 	freelist = c->freelist;
3009 
3010 	c->slab = NULL;
3011 	c->freelist = NULL;
3012 	c->tid = next_tid(c->tid);
3013 
3014 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3015 
3016 	if (slab) {
3017 		deactivate_slab(s, slab, freelist);
3018 		stat(s, CPUSLAB_FLUSH);
3019 	}
3020 }
3021 
3022 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
3023 {
3024 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3025 	void *freelist = c->freelist;
3026 	struct slab *slab = c->slab;
3027 
3028 	c->slab = NULL;
3029 	c->freelist = NULL;
3030 	c->tid = next_tid(c->tid);
3031 
3032 	if (slab) {
3033 		deactivate_slab(s, slab, freelist);
3034 		stat(s, CPUSLAB_FLUSH);
3035 	}
3036 
3037 	put_partials_cpu(s, c);
3038 }
3039 
3040 struct slub_flush_work {
3041 	struct work_struct work;
3042 	struct kmem_cache *s;
3043 	bool skip;
3044 };
3045 
3046 /*
3047  * Flush cpu slab.
3048  *
3049  * Called from CPU work handler with migration disabled.
3050  */
3051 static void flush_cpu_slab(struct work_struct *w)
3052 {
3053 	struct kmem_cache *s;
3054 	struct kmem_cache_cpu *c;
3055 	struct slub_flush_work *sfw;
3056 
3057 	sfw = container_of(w, struct slub_flush_work, work);
3058 
3059 	s = sfw->s;
3060 	c = this_cpu_ptr(s->cpu_slab);
3061 
3062 	if (c->slab)
3063 		flush_slab(s, c);
3064 
3065 	put_partials(s);
3066 }
3067 
3068 static bool has_cpu_slab(int cpu, struct kmem_cache *s)
3069 {
3070 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3071 
3072 	return c->slab || slub_percpu_partial(c);
3073 }
3074 
3075 static DEFINE_MUTEX(flush_lock);
3076 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
3077 
3078 static void flush_all_cpus_locked(struct kmem_cache *s)
3079 {
3080 	struct slub_flush_work *sfw;
3081 	unsigned int cpu;
3082 
3083 	lockdep_assert_cpus_held();
3084 	mutex_lock(&flush_lock);
3085 
3086 	for_each_online_cpu(cpu) {
3087 		sfw = &per_cpu(slub_flush, cpu);
3088 		if (!has_cpu_slab(cpu, s)) {
3089 			sfw->skip = true;
3090 			continue;
3091 		}
3092 		INIT_WORK(&sfw->work, flush_cpu_slab);
3093 		sfw->skip = false;
3094 		sfw->s = s;
3095 		queue_work_on(cpu, flushwq, &sfw->work);
3096 	}
3097 
3098 	for_each_online_cpu(cpu) {
3099 		sfw = &per_cpu(slub_flush, cpu);
3100 		if (sfw->skip)
3101 			continue;
3102 		flush_work(&sfw->work);
3103 	}
3104 
3105 	mutex_unlock(&flush_lock);
3106 }
3107 
3108 static void flush_all(struct kmem_cache *s)
3109 {
3110 	cpus_read_lock();
3111 	flush_all_cpus_locked(s);
3112 	cpus_read_unlock();
3113 }
3114 
3115 /*
3116  * Use the cpu notifier to insure that the cpu slabs are flushed when
3117  * necessary.
3118  */
3119 static int slub_cpu_dead(unsigned int cpu)
3120 {
3121 	struct kmem_cache *s;
3122 
3123 	mutex_lock(&slab_mutex);
3124 	list_for_each_entry(s, &slab_caches, list)
3125 		__flush_cpu_slab(s, cpu);
3126 	mutex_unlock(&slab_mutex);
3127 	return 0;
3128 }
3129 
3130 #else /* CONFIG_SLUB_TINY */
3131 static inline void flush_all_cpus_locked(struct kmem_cache *s) { }
3132 static inline void flush_all(struct kmem_cache *s) { }
3133 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
3134 static inline int slub_cpu_dead(unsigned int cpu) { return 0; }
3135 #endif /* CONFIG_SLUB_TINY */
3136 
3137 /*
3138  * Check if the objects in a per cpu structure fit numa
3139  * locality expectations.
3140  */
3141 static inline int node_match(struct slab *slab, int node)
3142 {
3143 #ifdef CONFIG_NUMA
3144 	if (node != NUMA_NO_NODE && slab_nid(slab) != node)
3145 		return 0;
3146 #endif
3147 	return 1;
3148 }
3149 
3150 #ifdef CONFIG_SLUB_DEBUG
3151 static int count_free(struct slab *slab)
3152 {
3153 	return slab->objects - slab->inuse;
3154 }
3155 
3156 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
3157 {
3158 	return atomic_long_read(&n->total_objects);
3159 }
3160 
3161 /* Supports checking bulk free of a constructed freelist */
3162 static inline bool free_debug_processing(struct kmem_cache *s,
3163 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
3164 	unsigned long addr, depot_stack_handle_t handle)
3165 {
3166 	bool checks_ok = false;
3167 	void *object = head;
3168 	int cnt = 0;
3169 
3170 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3171 		if (!check_slab(s, slab))
3172 			goto out;
3173 	}
3174 
3175 	if (slab->inuse < *bulk_cnt) {
3176 		slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
3177 			 slab->inuse, *bulk_cnt);
3178 		goto out;
3179 	}
3180 
3181 next_object:
3182 
3183 	if (++cnt > *bulk_cnt)
3184 		goto out_cnt;
3185 
3186 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3187 		if (!free_consistency_checks(s, slab, object, addr))
3188 			goto out;
3189 	}
3190 
3191 	if (s->flags & SLAB_STORE_USER)
3192 		set_track_update(s, object, TRACK_FREE, addr, handle);
3193 	trace(s, slab, object, 0);
3194 	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
3195 	init_object(s, object, SLUB_RED_INACTIVE);
3196 
3197 	/* Reached end of constructed freelist yet? */
3198 	if (object != tail) {
3199 		object = get_freepointer(s, object);
3200 		goto next_object;
3201 	}
3202 	checks_ok = true;
3203 
3204 out_cnt:
3205 	if (cnt != *bulk_cnt) {
3206 		slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3207 			 *bulk_cnt, cnt);
3208 		*bulk_cnt = cnt;
3209 	}
3210 
3211 out:
3212 
3213 	if (!checks_ok)
3214 		slab_fix(s, "Object at 0x%p not freed", object);
3215 
3216 	return checks_ok;
3217 }
3218 #endif /* CONFIG_SLUB_DEBUG */
3219 
3220 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
3221 static unsigned long count_partial(struct kmem_cache_node *n,
3222 					int (*get_count)(struct slab *))
3223 {
3224 	unsigned long flags;
3225 	unsigned long x = 0;
3226 	struct slab *slab;
3227 
3228 	spin_lock_irqsave(&n->list_lock, flags);
3229 	list_for_each_entry(slab, &n->partial, slab_list)
3230 		x += get_count(slab);
3231 	spin_unlock_irqrestore(&n->list_lock, flags);
3232 	return x;
3233 }
3234 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
3235 
3236 #ifdef CONFIG_SLUB_DEBUG
3237 static noinline void
3238 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
3239 {
3240 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
3241 				      DEFAULT_RATELIMIT_BURST);
3242 	int node;
3243 	struct kmem_cache_node *n;
3244 
3245 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
3246 		return;
3247 
3248 	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
3249 		nid, gfpflags, &gfpflags);
3250 	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
3251 		s->name, s->object_size, s->size, oo_order(s->oo),
3252 		oo_order(s->min));
3253 
3254 	if (oo_order(s->min) > get_order(s->object_size))
3255 		pr_warn("  %s debugging increased min order, use slab_debug=O to disable.\n",
3256 			s->name);
3257 
3258 	for_each_kmem_cache_node(s, node, n) {
3259 		unsigned long nr_slabs;
3260 		unsigned long nr_objs;
3261 		unsigned long nr_free;
3262 
3263 		nr_free  = count_partial(n, count_free);
3264 		nr_slabs = node_nr_slabs(n);
3265 		nr_objs  = node_nr_objs(n);
3266 
3267 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
3268 			node, nr_slabs, nr_objs, nr_free);
3269 	}
3270 }
3271 #else /* CONFIG_SLUB_DEBUG */
3272 static inline void
3273 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
3274 #endif
3275 
3276 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
3277 {
3278 	if (unlikely(slab_test_pfmemalloc(slab)))
3279 		return gfp_pfmemalloc_allowed(gfpflags);
3280 
3281 	return true;
3282 }
3283 
3284 #ifndef CONFIG_SLUB_TINY
3285 static inline bool
3286 __update_cpu_freelist_fast(struct kmem_cache *s,
3287 			   void *freelist_old, void *freelist_new,
3288 			   unsigned long tid)
3289 {
3290 	freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
3291 	freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
3292 
3293 	return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
3294 					     &old.full, new.full);
3295 }
3296 
3297 /*
3298  * Check the slab->freelist and either transfer the freelist to the
3299  * per cpu freelist or deactivate the slab.
3300  *
3301  * The slab is still frozen if the return value is not NULL.
3302  *
3303  * If this function returns NULL then the slab has been unfrozen.
3304  */
3305 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
3306 {
3307 	struct slab new;
3308 	unsigned long counters;
3309 	void *freelist;
3310 
3311 	lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3312 
3313 	do {
3314 		freelist = slab->freelist;
3315 		counters = slab->counters;
3316 
3317 		new.counters = counters;
3318 
3319 		new.inuse = slab->objects;
3320 		new.frozen = freelist != NULL;
3321 
3322 	} while (!__slab_update_freelist(s, slab,
3323 		freelist, counters,
3324 		NULL, new.counters,
3325 		"get_freelist"));
3326 
3327 	return freelist;
3328 }
3329 
3330 /*
3331  * Freeze the partial slab and return the pointer to the freelist.
3332  */
3333 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
3334 {
3335 	struct slab new;
3336 	unsigned long counters;
3337 	void *freelist;
3338 
3339 	do {
3340 		freelist = slab->freelist;
3341 		counters = slab->counters;
3342 
3343 		new.counters = counters;
3344 		VM_BUG_ON(new.frozen);
3345 
3346 		new.inuse = slab->objects;
3347 		new.frozen = 1;
3348 
3349 	} while (!slab_update_freelist(s, slab,
3350 		freelist, counters,
3351 		NULL, new.counters,
3352 		"freeze_slab"));
3353 
3354 	return freelist;
3355 }
3356 
3357 /*
3358  * Slow path. The lockless freelist is empty or we need to perform
3359  * debugging duties.
3360  *
3361  * Processing is still very fast if new objects have been freed to the
3362  * regular freelist. In that case we simply take over the regular freelist
3363  * as the lockless freelist and zap the regular freelist.
3364  *
3365  * If that is not working then we fall back to the partial lists. We take the
3366  * first element of the freelist as the object to allocate now and move the
3367  * rest of the freelist to the lockless freelist.
3368  *
3369  * And if we were unable to get a new slab from the partial slab lists then
3370  * we need to allocate a new slab. This is the slowest path since it involves
3371  * a call to the page allocator and the setup of a new slab.
3372  *
3373  * Version of __slab_alloc to use when we know that preemption is
3374  * already disabled (which is the case for bulk allocation).
3375  */
3376 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3377 			  unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3378 {
3379 	void *freelist;
3380 	struct slab *slab;
3381 	unsigned long flags;
3382 	struct partial_context pc;
3383 
3384 	stat(s, ALLOC_SLOWPATH);
3385 
3386 reread_slab:
3387 
3388 	slab = READ_ONCE(c->slab);
3389 	if (!slab) {
3390 		/*
3391 		 * if the node is not online or has no normal memory, just
3392 		 * ignore the node constraint
3393 		 */
3394 		if (unlikely(node != NUMA_NO_NODE &&
3395 			     !node_isset(node, slab_nodes)))
3396 			node = NUMA_NO_NODE;
3397 		goto new_slab;
3398 	}
3399 
3400 	if (unlikely(!node_match(slab, node))) {
3401 		/*
3402 		 * same as above but node_match() being false already
3403 		 * implies node != NUMA_NO_NODE
3404 		 */
3405 		if (!node_isset(node, slab_nodes)) {
3406 			node = NUMA_NO_NODE;
3407 		} else {
3408 			stat(s, ALLOC_NODE_MISMATCH);
3409 			goto deactivate_slab;
3410 		}
3411 	}
3412 
3413 	/*
3414 	 * By rights, we should be searching for a slab page that was
3415 	 * PFMEMALLOC but right now, we are losing the pfmemalloc
3416 	 * information when the page leaves the per-cpu allocator
3417 	 */
3418 	if (unlikely(!pfmemalloc_match(slab, gfpflags)))
3419 		goto deactivate_slab;
3420 
3421 	/* must check again c->slab in case we got preempted and it changed */
3422 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3423 	if (unlikely(slab != c->slab)) {
3424 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3425 		goto reread_slab;
3426 	}
3427 	freelist = c->freelist;
3428 	if (freelist)
3429 		goto load_freelist;
3430 
3431 	freelist = get_freelist(s, slab);
3432 
3433 	if (!freelist) {
3434 		c->slab = NULL;
3435 		c->tid = next_tid(c->tid);
3436 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3437 		stat(s, DEACTIVATE_BYPASS);
3438 		goto new_slab;
3439 	}
3440 
3441 	stat(s, ALLOC_REFILL);
3442 
3443 load_freelist:
3444 
3445 	lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3446 
3447 	/*
3448 	 * freelist is pointing to the list of objects to be used.
3449 	 * slab is pointing to the slab from which the objects are obtained.
3450 	 * That slab must be frozen for per cpu allocations to work.
3451 	 */
3452 	VM_BUG_ON(!c->slab->frozen);
3453 	c->freelist = get_freepointer(s, freelist);
3454 	c->tid = next_tid(c->tid);
3455 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3456 	return freelist;
3457 
3458 deactivate_slab:
3459 
3460 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3461 	if (slab != c->slab) {
3462 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3463 		goto reread_slab;
3464 	}
3465 	freelist = c->freelist;
3466 	c->slab = NULL;
3467 	c->freelist = NULL;
3468 	c->tid = next_tid(c->tid);
3469 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3470 	deactivate_slab(s, slab, freelist);
3471 
3472 new_slab:
3473 
3474 #ifdef CONFIG_SLUB_CPU_PARTIAL
3475 	while (slub_percpu_partial(c)) {
3476 		local_lock_irqsave(&s->cpu_slab->lock, flags);
3477 		if (unlikely(c->slab)) {
3478 			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3479 			goto reread_slab;
3480 		}
3481 		if (unlikely(!slub_percpu_partial(c))) {
3482 			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3483 			/* we were preempted and partial list got empty */
3484 			goto new_objects;
3485 		}
3486 
3487 		slab = slub_percpu_partial(c);
3488 		slub_set_percpu_partial(c, slab);
3489 
3490 		if (likely(node_match(slab, node) &&
3491 			   pfmemalloc_match(slab, gfpflags))) {
3492 			c->slab = slab;
3493 			freelist = get_freelist(s, slab);
3494 			VM_BUG_ON(!freelist);
3495 			stat(s, CPU_PARTIAL_ALLOC);
3496 			goto load_freelist;
3497 		}
3498 
3499 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3500 
3501 		slab->next = NULL;
3502 		__put_partials(s, slab);
3503 	}
3504 #endif
3505 
3506 new_objects:
3507 
3508 	pc.flags = gfpflags;
3509 	pc.orig_size = orig_size;
3510 	slab = get_partial(s, node, &pc);
3511 	if (slab) {
3512 		if (kmem_cache_debug(s)) {
3513 			freelist = pc.object;
3514 			/*
3515 			 * For debug caches here we had to go through
3516 			 * alloc_single_from_partial() so just store the
3517 			 * tracking info and return the object.
3518 			 */
3519 			if (s->flags & SLAB_STORE_USER)
3520 				set_track(s, freelist, TRACK_ALLOC, addr);
3521 
3522 			return freelist;
3523 		}
3524 
3525 		freelist = freeze_slab(s, slab);
3526 		goto retry_load_slab;
3527 	}
3528 
3529 	slub_put_cpu_ptr(s->cpu_slab);
3530 	slab = new_slab(s, gfpflags, node);
3531 	c = slub_get_cpu_ptr(s->cpu_slab);
3532 
3533 	if (unlikely(!slab)) {
3534 		slab_out_of_memory(s, gfpflags, node);
3535 		return NULL;
3536 	}
3537 
3538 	stat(s, ALLOC_SLAB);
3539 
3540 	if (kmem_cache_debug(s)) {
3541 		freelist = alloc_single_from_new_slab(s, slab, orig_size);
3542 
3543 		if (unlikely(!freelist))
3544 			goto new_objects;
3545 
3546 		if (s->flags & SLAB_STORE_USER)
3547 			set_track(s, freelist, TRACK_ALLOC, addr);
3548 
3549 		return freelist;
3550 	}
3551 
3552 	/*
3553 	 * No other reference to the slab yet so we can
3554 	 * muck around with it freely without cmpxchg
3555 	 */
3556 	freelist = slab->freelist;
3557 	slab->freelist = NULL;
3558 	slab->inuse = slab->objects;
3559 	slab->frozen = 1;
3560 
3561 	inc_slabs_node(s, slab_nid(slab), slab->objects);
3562 
3563 	if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
3564 		/*
3565 		 * For !pfmemalloc_match() case we don't load freelist so that
3566 		 * we don't make further mismatched allocations easier.
3567 		 */
3568 		deactivate_slab(s, slab, get_freepointer(s, freelist));
3569 		return freelist;
3570 	}
3571 
3572 retry_load_slab:
3573 
3574 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3575 	if (unlikely(c->slab)) {
3576 		void *flush_freelist = c->freelist;
3577 		struct slab *flush_slab = c->slab;
3578 
3579 		c->slab = NULL;
3580 		c->freelist = NULL;
3581 		c->tid = next_tid(c->tid);
3582 
3583 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3584 
3585 		deactivate_slab(s, flush_slab, flush_freelist);
3586 
3587 		stat(s, CPUSLAB_FLUSH);
3588 
3589 		goto retry_load_slab;
3590 	}
3591 	c->slab = slab;
3592 
3593 	goto load_freelist;
3594 }
3595 
3596 /*
3597  * A wrapper for ___slab_alloc() for contexts where preemption is not yet
3598  * disabled. Compensates for possible cpu changes by refetching the per cpu area
3599  * pointer.
3600  */
3601 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3602 			  unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3603 {
3604 	void *p;
3605 
3606 #ifdef CONFIG_PREEMPT_COUNT
3607 	/*
3608 	 * We may have been preempted and rescheduled on a different
3609 	 * cpu before disabling preemption. Need to reload cpu area
3610 	 * pointer.
3611 	 */
3612 	c = slub_get_cpu_ptr(s->cpu_slab);
3613 #endif
3614 
3615 	p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
3616 #ifdef CONFIG_PREEMPT_COUNT
3617 	slub_put_cpu_ptr(s->cpu_slab);
3618 #endif
3619 	return p;
3620 }
3621 
3622 static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
3623 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3624 {
3625 	struct kmem_cache_cpu *c;
3626 	struct slab *slab;
3627 	unsigned long tid;
3628 	void *object;
3629 
3630 redo:
3631 	/*
3632 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
3633 	 * enabled. We may switch back and forth between cpus while
3634 	 * reading from one cpu area. That does not matter as long
3635 	 * as we end up on the original cpu again when doing the cmpxchg.
3636 	 *
3637 	 * We must guarantee that tid and kmem_cache_cpu are retrieved on the
3638 	 * same cpu. We read first the kmem_cache_cpu pointer and use it to read
3639 	 * the tid. If we are preempted and switched to another cpu between the
3640 	 * two reads, it's OK as the two are still associated with the same cpu
3641 	 * and cmpxchg later will validate the cpu.
3642 	 */
3643 	c = raw_cpu_ptr(s->cpu_slab);
3644 	tid = READ_ONCE(c->tid);
3645 
3646 	/*
3647 	 * Irqless object alloc/free algorithm used here depends on sequence
3648 	 * of fetching cpu_slab's data. tid should be fetched before anything
3649 	 * on c to guarantee that object and slab associated with previous tid
3650 	 * won't be used with current tid. If we fetch tid first, object and
3651 	 * slab could be one associated with next tid and our alloc/free
3652 	 * request will be failed. In this case, we will retry. So, no problem.
3653 	 */
3654 	barrier();
3655 
3656 	/*
3657 	 * The transaction ids are globally unique per cpu and per operation on
3658 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
3659 	 * occurs on the right processor and that there was no operation on the
3660 	 * linked list in between.
3661 	 */
3662 
3663 	object = c->freelist;
3664 	slab = c->slab;
3665 
3666 	if (!USE_LOCKLESS_FAST_PATH() ||
3667 	    unlikely(!object || !slab || !node_match(slab, node))) {
3668 		object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
3669 	} else {
3670 		void *next_object = get_freepointer_safe(s, object);
3671 
3672 		/*
3673 		 * The cmpxchg will only match if there was no additional
3674 		 * operation and if we are on the right processor.
3675 		 *
3676 		 * The cmpxchg does the following atomically (without lock
3677 		 * semantics!)
3678 		 * 1. Relocate first pointer to the current per cpu area.
3679 		 * 2. Verify that tid and freelist have not been changed
3680 		 * 3. If they were not changed replace tid and freelist
3681 		 *
3682 		 * Since this is without lock semantics the protection is only
3683 		 * against code executing on this cpu *not* from access by
3684 		 * other cpus.
3685 		 */
3686 		if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) {
3687 			note_cmpxchg_failure("slab_alloc", s, tid);
3688 			goto redo;
3689 		}
3690 		prefetch_freepointer(s, next_object);
3691 		stat(s, ALLOC_FASTPATH);
3692 	}
3693 
3694 	return object;
3695 }
3696 #else /* CONFIG_SLUB_TINY */
3697 static void *__slab_alloc_node(struct kmem_cache *s,
3698 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3699 {
3700 	struct partial_context pc;
3701 	struct slab *slab;
3702 	void *object;
3703 
3704 	pc.flags = gfpflags;
3705 	pc.orig_size = orig_size;
3706 	slab = get_partial(s, node, &pc);
3707 
3708 	if (slab)
3709 		return pc.object;
3710 
3711 	slab = new_slab(s, gfpflags, node);
3712 	if (unlikely(!slab)) {
3713 		slab_out_of_memory(s, gfpflags, node);
3714 		return NULL;
3715 	}
3716 
3717 	object = alloc_single_from_new_slab(s, slab, orig_size);
3718 
3719 	return object;
3720 }
3721 #endif /* CONFIG_SLUB_TINY */
3722 
3723 /*
3724  * If the object has been wiped upon free, make sure it's fully initialized by
3725  * zeroing out freelist pointer.
3726  */
3727 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
3728 						   void *obj)
3729 {
3730 	if (unlikely(slab_want_init_on_free(s)) && obj &&
3731 	    !freeptr_outside_object(s))
3732 		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
3733 			0, sizeof(void *));
3734 }
3735 
3736 noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
3737 {
3738 	if (__should_failslab(s, gfpflags))
3739 		return -ENOMEM;
3740 	return 0;
3741 }
3742 ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
3743 
3744 static __fastpath_inline
3745 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
3746 				       struct list_lru *lru,
3747 				       struct obj_cgroup **objcgp,
3748 				       size_t size, gfp_t flags)
3749 {
3750 	flags &= gfp_allowed_mask;
3751 
3752 	might_alloc(flags);
3753 
3754 	if (unlikely(should_failslab(s, flags)))
3755 		return NULL;
3756 
3757 	if (unlikely(!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)))
3758 		return NULL;
3759 
3760 	return s;
3761 }
3762 
3763 static __fastpath_inline
3764 void slab_post_alloc_hook(struct kmem_cache *s,	struct obj_cgroup *objcg,
3765 			  gfp_t flags, size_t size, void **p, bool init,
3766 			  unsigned int orig_size)
3767 {
3768 	unsigned int zero_size = s->object_size;
3769 	bool kasan_init = init;
3770 	size_t i;
3771 	gfp_t init_flags = flags & gfp_allowed_mask;
3772 
3773 	/*
3774 	 * For kmalloc object, the allocated memory size(object_size) is likely
3775 	 * larger than the requested size(orig_size). If redzone check is
3776 	 * enabled for the extra space, don't zero it, as it will be redzoned
3777 	 * soon. The redzone operation for this extra space could be seen as a
3778 	 * replacement of current poisoning under certain debug option, and
3779 	 * won't break other sanity checks.
3780 	 */
3781 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
3782 	    (s->flags & SLAB_KMALLOC))
3783 		zero_size = orig_size;
3784 
3785 	/*
3786 	 * When slab_debug is enabled, avoid memory initialization integrated
3787 	 * into KASAN and instead zero out the memory via the memset below with
3788 	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
3789 	 * cause false-positive reports. This does not lead to a performance
3790 	 * penalty on production builds, as slab_debug is not intended to be
3791 	 * enabled there.
3792 	 */
3793 	if (__slub_debug_enabled())
3794 		kasan_init = false;
3795 
3796 	/*
3797 	 * As memory initialization might be integrated into KASAN,
3798 	 * kasan_slab_alloc and initialization memset must be
3799 	 * kept together to avoid discrepancies in behavior.
3800 	 *
3801 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
3802 	 */
3803 	for (i = 0; i < size; i++) {
3804 		p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
3805 		if (p[i] && init && (!kasan_init ||
3806 				     !kasan_has_integrated_init()))
3807 			memset(p[i], 0, zero_size);
3808 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
3809 					 s->flags, init_flags);
3810 		kmsan_slab_alloc(s, p[i], init_flags);
3811 	}
3812 
3813 	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
3814 }
3815 
3816 /*
3817  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
3818  * have the fastpath folded into their functions. So no function call
3819  * overhead for requests that can be satisfied on the fastpath.
3820  *
3821  * The fastpath works by first checking if the lockless freelist can be used.
3822  * If not then __slab_alloc is called for slow processing.
3823  *
3824  * Otherwise we can simply pick the next object from the lockless free list.
3825  */
3826 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
3827 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3828 {
3829 	void *object;
3830 	struct obj_cgroup *objcg = NULL;
3831 	bool init = false;
3832 
3833 	s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
3834 	if (unlikely(!s))
3835 		return NULL;
3836 
3837 	object = kfence_alloc(s, orig_size, gfpflags);
3838 	if (unlikely(object))
3839 		goto out;
3840 
3841 	object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
3842 
3843 	maybe_wipe_obj_freeptr(s, object);
3844 	init = slab_want_init_on_alloc(gfpflags, s);
3845 
3846 out:
3847 	/*
3848 	 * When init equals 'true', like for kzalloc() family, only
3849 	 * @orig_size bytes might be zeroed instead of s->object_size
3850 	 */
3851 	slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
3852 
3853 	return object;
3854 }
3855 
3856 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
3857 {
3858 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
3859 				    s->object_size);
3860 
3861 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
3862 
3863 	return ret;
3864 }
3865 EXPORT_SYMBOL(kmem_cache_alloc);
3866 
3867 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
3868 			   gfp_t gfpflags)
3869 {
3870 	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
3871 				    s->object_size);
3872 
3873 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
3874 
3875 	return ret;
3876 }
3877 EXPORT_SYMBOL(kmem_cache_alloc_lru);
3878 
3879 /**
3880  * kmem_cache_alloc_node - Allocate an object on the specified node
3881  * @s: The cache to allocate from.
3882  * @gfpflags: See kmalloc().
3883  * @node: node number of the target node.
3884  *
3885  * Identical to kmem_cache_alloc but it will allocate memory on the given
3886  * node, which can improve the performance for cpu bound structures.
3887  *
3888  * Fallback to other node is possible if __GFP_THISNODE is not set.
3889  *
3890  * Return: pointer to the new object or %NULL in case of error
3891  */
3892 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
3893 {
3894 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
3895 
3896 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
3897 
3898 	return ret;
3899 }
3900 EXPORT_SYMBOL(kmem_cache_alloc_node);
3901 
3902 /*
3903  * To avoid unnecessary overhead, we pass through large allocation requests
3904  * directly to the page allocator. We use __GFP_COMP, because we will need to
3905  * know the allocation order to free the pages properly in kfree.
3906  */
3907 static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
3908 {
3909 	struct folio *folio;
3910 	void *ptr = NULL;
3911 	unsigned int order = get_order(size);
3912 
3913 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
3914 		flags = kmalloc_fix_flags(flags);
3915 
3916 	flags |= __GFP_COMP;
3917 	folio = (struct folio *)alloc_pages_node(node, flags, order);
3918 	if (folio) {
3919 		ptr = folio_address(folio);
3920 		lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
3921 				      PAGE_SIZE << order);
3922 	}
3923 
3924 	ptr = kasan_kmalloc_large(ptr, size, flags);
3925 	/* As ptr might get tagged, call kmemleak hook after KASAN. */
3926 	kmemleak_alloc(ptr, size, 1, flags);
3927 	kmsan_kmalloc_large(ptr, size, flags);
3928 
3929 	return ptr;
3930 }
3931 
3932 void *kmalloc_large(size_t size, gfp_t flags)
3933 {
3934 	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
3935 
3936 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
3937 		      flags, NUMA_NO_NODE);
3938 	return ret;
3939 }
3940 EXPORT_SYMBOL(kmalloc_large);
3941 
3942 void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3943 {
3944 	void *ret = __kmalloc_large_node(size, flags, node);
3945 
3946 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
3947 		      flags, node);
3948 	return ret;
3949 }
3950 EXPORT_SYMBOL(kmalloc_large_node);
3951 
3952 static __always_inline
3953 void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
3954 			unsigned long caller)
3955 {
3956 	struct kmem_cache *s;
3957 	void *ret;
3958 
3959 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3960 		ret = __kmalloc_large_node(size, flags, node);
3961 		trace_kmalloc(caller, ret, size,
3962 			      PAGE_SIZE << get_order(size), flags, node);
3963 		return ret;
3964 	}
3965 
3966 	if (unlikely(!size))
3967 		return ZERO_SIZE_PTR;
3968 
3969 	s = kmalloc_slab(size, flags, caller);
3970 
3971 	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
3972 	ret = kasan_kmalloc(s, ret, size, flags);
3973 	trace_kmalloc(caller, ret, size, s->size, flags, node);
3974 	return ret;
3975 }
3976 
3977 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3978 {
3979 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3980 }
3981 EXPORT_SYMBOL(__kmalloc_node);
3982 
3983 void *__kmalloc(size_t size, gfp_t flags)
3984 {
3985 	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
3986 }
3987 EXPORT_SYMBOL(__kmalloc);
3988 
3989 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3990 				  int node, unsigned long caller)
3991 {
3992 	return __do_kmalloc_node(size, flags, node, caller);
3993 }
3994 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3995 
3996 void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
3997 {
3998 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
3999 					    _RET_IP_, size);
4000 
4001 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
4002 
4003 	ret = kasan_kmalloc(s, ret, size, gfpflags);
4004 	return ret;
4005 }
4006 EXPORT_SYMBOL(kmalloc_trace);
4007 
4008 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
4009 			 int node, size_t size)
4010 {
4011 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
4012 
4013 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4014 
4015 	ret = kasan_kmalloc(s, ret, size, gfpflags);
4016 	return ret;
4017 }
4018 EXPORT_SYMBOL(kmalloc_node_trace);
4019 
4020 static noinline void free_to_partial_list(
4021 	struct kmem_cache *s, struct slab *slab,
4022 	void *head, void *tail, int bulk_cnt,
4023 	unsigned long addr)
4024 {
4025 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
4026 	struct slab *slab_free = NULL;
4027 	int cnt = bulk_cnt;
4028 	unsigned long flags;
4029 	depot_stack_handle_t handle = 0;
4030 
4031 	if (s->flags & SLAB_STORE_USER)
4032 		handle = set_track_prepare();
4033 
4034 	spin_lock_irqsave(&n->list_lock, flags);
4035 
4036 	if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
4037 		void *prior = slab->freelist;
4038 
4039 		/* Perform the actual freeing while we still hold the locks */
4040 		slab->inuse -= cnt;
4041 		set_freepointer(s, tail, prior);
4042 		slab->freelist = head;
4043 
4044 		/*
4045 		 * If the slab is empty, and node's partial list is full,
4046 		 * it should be discarded anyway no matter it's on full or
4047 		 * partial list.
4048 		 */
4049 		if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
4050 			slab_free = slab;
4051 
4052 		if (!prior) {
4053 			/* was on full list */
4054 			remove_full(s, n, slab);
4055 			if (!slab_free) {
4056 				add_partial(n, slab, DEACTIVATE_TO_TAIL);
4057 				stat(s, FREE_ADD_PARTIAL);
4058 			}
4059 		} else if (slab_free) {
4060 			remove_partial(n, slab);
4061 			stat(s, FREE_REMOVE_PARTIAL);
4062 		}
4063 	}
4064 
4065 	if (slab_free) {
4066 		/*
4067 		 * Update the counters while still holding n->list_lock to
4068 		 * prevent spurious validation warnings
4069 		 */
4070 		dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
4071 	}
4072 
4073 	spin_unlock_irqrestore(&n->list_lock, flags);
4074 
4075 	if (slab_free) {
4076 		stat(s, FREE_SLAB);
4077 		free_slab(s, slab_free);
4078 	}
4079 }
4080 
4081 /*
4082  * Slow path handling. This may still be called frequently since objects
4083  * have a longer lifetime than the cpu slabs in most processing loads.
4084  *
4085  * So we still attempt to reduce cache line usage. Just take the slab
4086  * lock and free the item. If there is no additional partial slab
4087  * handling required then we can return immediately.
4088  */
4089 static void __slab_free(struct kmem_cache *s, struct slab *slab,
4090 			void *head, void *tail, int cnt,
4091 			unsigned long addr)
4092 
4093 {
4094 	void *prior;
4095 	int was_frozen;
4096 	struct slab new;
4097 	unsigned long counters;
4098 	struct kmem_cache_node *n = NULL;
4099 	unsigned long flags;
4100 	bool on_node_partial;
4101 
4102 	stat(s, FREE_SLOWPATH);
4103 
4104 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4105 		free_to_partial_list(s, slab, head, tail, cnt, addr);
4106 		return;
4107 	}
4108 
4109 	do {
4110 		if (unlikely(n)) {
4111 			spin_unlock_irqrestore(&n->list_lock, flags);
4112 			n = NULL;
4113 		}
4114 		prior = slab->freelist;
4115 		counters = slab->counters;
4116 		set_freepointer(s, tail, prior);
4117 		new.counters = counters;
4118 		was_frozen = new.frozen;
4119 		new.inuse -= cnt;
4120 		if ((!new.inuse || !prior) && !was_frozen) {
4121 			/* Needs to be taken off a list */
4122 			if (!kmem_cache_has_cpu_partial(s) || prior) {
4123 
4124 				n = get_node(s, slab_nid(slab));
4125 				/*
4126 				 * Speculatively acquire the list_lock.
4127 				 * If the cmpxchg does not succeed then we may
4128 				 * drop the list_lock without any processing.
4129 				 *
4130 				 * Otherwise the list_lock will synchronize with
4131 				 * other processors updating the list of slabs.
4132 				 */
4133 				spin_lock_irqsave(&n->list_lock, flags);
4134 
4135 				on_node_partial = slab_test_node_partial(slab);
4136 			}
4137 		}
4138 
4139 	} while (!slab_update_freelist(s, slab,
4140 		prior, counters,
4141 		head, new.counters,
4142 		"__slab_free"));
4143 
4144 	if (likely(!n)) {
4145 
4146 		if (likely(was_frozen)) {
4147 			/*
4148 			 * The list lock was not taken therefore no list
4149 			 * activity can be necessary.
4150 			 */
4151 			stat(s, FREE_FROZEN);
4152 		} else if (kmem_cache_has_cpu_partial(s) && !prior) {
4153 			/*
4154 			 * If we started with a full slab then put it onto the
4155 			 * per cpu partial list.
4156 			 */
4157 			put_cpu_partial(s, slab, 1);
4158 			stat(s, CPU_PARTIAL_FREE);
4159 		}
4160 
4161 		return;
4162 	}
4163 
4164 	/*
4165 	 * This slab was partially empty but not on the per-node partial list,
4166 	 * in which case we shouldn't manipulate its list, just return.
4167 	 */
4168 	if (prior && !on_node_partial) {
4169 		spin_unlock_irqrestore(&n->list_lock, flags);
4170 		return;
4171 	}
4172 
4173 	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
4174 		goto slab_empty;
4175 
4176 	/*
4177 	 * Objects left in the slab. If it was not on the partial list before
4178 	 * then add it.
4179 	 */
4180 	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
4181 		add_partial(n, slab, DEACTIVATE_TO_TAIL);
4182 		stat(s, FREE_ADD_PARTIAL);
4183 	}
4184 	spin_unlock_irqrestore(&n->list_lock, flags);
4185 	return;
4186 
4187 slab_empty:
4188 	if (prior) {
4189 		/*
4190 		 * Slab on the partial list.
4191 		 */
4192 		remove_partial(n, slab);
4193 		stat(s, FREE_REMOVE_PARTIAL);
4194 	}
4195 
4196 	spin_unlock_irqrestore(&n->list_lock, flags);
4197 	stat(s, FREE_SLAB);
4198 	discard_slab(s, slab);
4199 }
4200 
4201 #ifndef CONFIG_SLUB_TINY
4202 /*
4203  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
4204  * can perform fastpath freeing without additional function calls.
4205  *
4206  * The fastpath is only possible if we are freeing to the current cpu slab
4207  * of this processor. This typically the case if we have just allocated
4208  * the item before.
4209  *
4210  * If fastpath is not possible then fall back to __slab_free where we deal
4211  * with all sorts of special processing.
4212  *
4213  * Bulk free of a freelist with several objects (all pointing to the
4214  * same slab) possible by specifying head and tail ptr, plus objects
4215  * count (cnt). Bulk free indicated by tail pointer being set.
4216  */
4217 static __always_inline void do_slab_free(struct kmem_cache *s,
4218 				struct slab *slab, void *head, void *tail,
4219 				int cnt, unsigned long addr)
4220 {
4221 	struct kmem_cache_cpu *c;
4222 	unsigned long tid;
4223 	void **freelist;
4224 
4225 redo:
4226 	/*
4227 	 * Determine the currently cpus per cpu slab.
4228 	 * The cpu may change afterward. However that does not matter since
4229 	 * data is retrieved via this pointer. If we are on the same cpu
4230 	 * during the cmpxchg then the free will succeed.
4231 	 */
4232 	c = raw_cpu_ptr(s->cpu_slab);
4233 	tid = READ_ONCE(c->tid);
4234 
4235 	/* Same with comment on barrier() in slab_alloc_node() */
4236 	barrier();
4237 
4238 	if (unlikely(slab != c->slab)) {
4239 		__slab_free(s, slab, head, tail, cnt, addr);
4240 		return;
4241 	}
4242 
4243 	if (USE_LOCKLESS_FAST_PATH()) {
4244 		freelist = READ_ONCE(c->freelist);
4245 
4246 		set_freepointer(s, tail, freelist);
4247 
4248 		if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
4249 			note_cmpxchg_failure("slab_free", s, tid);
4250 			goto redo;
4251 		}
4252 	} else {
4253 		/* Update the free list under the local lock */
4254 		local_lock(&s->cpu_slab->lock);
4255 		c = this_cpu_ptr(s->cpu_slab);
4256 		if (unlikely(slab != c->slab)) {
4257 			local_unlock(&s->cpu_slab->lock);
4258 			goto redo;
4259 		}
4260 		tid = c->tid;
4261 		freelist = c->freelist;
4262 
4263 		set_freepointer(s, tail, freelist);
4264 		c->freelist = head;
4265 		c->tid = next_tid(tid);
4266 
4267 		local_unlock(&s->cpu_slab->lock);
4268 	}
4269 	stat_add(s, FREE_FASTPATH, cnt);
4270 }
4271 #else /* CONFIG_SLUB_TINY */
4272 static void do_slab_free(struct kmem_cache *s,
4273 				struct slab *slab, void *head, void *tail,
4274 				int cnt, unsigned long addr)
4275 {
4276 	__slab_free(s, slab, head, tail, cnt, addr);
4277 }
4278 #endif /* CONFIG_SLUB_TINY */
4279 
4280 static __fastpath_inline
4281 void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
4282 	       unsigned long addr)
4283 {
4284 	memcg_slab_free_hook(s, slab, &object, 1);
4285 
4286 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
4287 		do_slab_free(s, slab, object, object, 1, addr);
4288 }
4289 
4290 static __fastpath_inline
4291 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
4292 		    void *tail, void **p, int cnt, unsigned long addr)
4293 {
4294 	memcg_slab_free_hook(s, slab, p, cnt);
4295 	/*
4296 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
4297 	 * to remove objects, whose reuse must be delayed.
4298 	 */
4299 	if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt)))
4300 		do_slab_free(s, slab, head, tail, cnt, addr);
4301 }
4302 
4303 #ifdef CONFIG_KASAN_GENERIC
4304 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
4305 {
4306 	do_slab_free(cache, virt_to_slab(x), x, x, 1, addr);
4307 }
4308 #endif
4309 
4310 static inline struct kmem_cache *virt_to_cache(const void *obj)
4311 {
4312 	struct slab *slab;
4313 
4314 	slab = virt_to_slab(obj);
4315 	if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
4316 		return NULL;
4317 	return slab->slab_cache;
4318 }
4319 
4320 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
4321 {
4322 	struct kmem_cache *cachep;
4323 
4324 	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
4325 	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
4326 		return s;
4327 
4328 	cachep = virt_to_cache(x);
4329 	if (WARN(cachep && cachep != s,
4330 		 "%s: Wrong slab cache. %s but object is from %s\n",
4331 		 __func__, s->name, cachep->name))
4332 		print_tracking(cachep, x);
4333 	return cachep;
4334 }
4335 
4336 /**
4337  * kmem_cache_free - Deallocate an object
4338  * @s: The cache the allocation was from.
4339  * @x: The previously allocated object.
4340  *
4341  * Free an object which was previously allocated from this
4342  * cache.
4343  */
4344 void kmem_cache_free(struct kmem_cache *s, void *x)
4345 {
4346 	s = cache_from_obj(s, x);
4347 	if (!s)
4348 		return;
4349 	trace_kmem_cache_free(_RET_IP_, x, s);
4350 	slab_free(s, virt_to_slab(x), x, _RET_IP_);
4351 }
4352 EXPORT_SYMBOL(kmem_cache_free);
4353 
4354 static void free_large_kmalloc(struct folio *folio, void *object)
4355 {
4356 	unsigned int order = folio_order(folio);
4357 
4358 	if (WARN_ON_ONCE(order == 0))
4359 		pr_warn_once("object pointer: 0x%p\n", object);
4360 
4361 	kmemleak_free(object);
4362 	kasan_kfree_large(object);
4363 	kmsan_kfree_large(object);
4364 
4365 	lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4366 			      -(PAGE_SIZE << order));
4367 	folio_put(folio);
4368 }
4369 
4370 /**
4371  * kfree - free previously allocated memory
4372  * @object: pointer returned by kmalloc() or kmem_cache_alloc()
4373  *
4374  * If @object is NULL, no operation is performed.
4375  */
4376 void kfree(const void *object)
4377 {
4378 	struct folio *folio;
4379 	struct slab *slab;
4380 	struct kmem_cache *s;
4381 	void *x = (void *)object;
4382 
4383 	trace_kfree(_RET_IP_, object);
4384 
4385 	if (unlikely(ZERO_OR_NULL_PTR(object)))
4386 		return;
4387 
4388 	folio = virt_to_folio(object);
4389 	if (unlikely(!folio_test_slab(folio))) {
4390 		free_large_kmalloc(folio, (void *)object);
4391 		return;
4392 	}
4393 
4394 	slab = folio_slab(folio);
4395 	s = slab->slab_cache;
4396 	slab_free(s, slab, x, _RET_IP_);
4397 }
4398 EXPORT_SYMBOL(kfree);
4399 
4400 struct detached_freelist {
4401 	struct slab *slab;
4402 	void *tail;
4403 	void *freelist;
4404 	int cnt;
4405 	struct kmem_cache *s;
4406 };
4407 
4408 /*
4409  * This function progressively scans the array with free objects (with
4410  * a limited look ahead) and extract objects belonging to the same
4411  * slab.  It builds a detached freelist directly within the given
4412  * slab/objects.  This can happen without any need for
4413  * synchronization, because the objects are owned by running process.
4414  * The freelist is build up as a single linked list in the objects.
4415  * The idea is, that this detached freelist can then be bulk
4416  * transferred to the real freelist(s), but only requiring a single
4417  * synchronization primitive.  Look ahead in the array is limited due
4418  * to performance reasons.
4419  */
4420 static inline
4421 int build_detached_freelist(struct kmem_cache *s, size_t size,
4422 			    void **p, struct detached_freelist *df)
4423 {
4424 	int lookahead = 3;
4425 	void *object;
4426 	struct folio *folio;
4427 	size_t same;
4428 
4429 	object = p[--size];
4430 	folio = virt_to_folio(object);
4431 	if (!s) {
4432 		/* Handle kalloc'ed objects */
4433 		if (unlikely(!folio_test_slab(folio))) {
4434 			free_large_kmalloc(folio, object);
4435 			df->slab = NULL;
4436 			return size;
4437 		}
4438 		/* Derive kmem_cache from object */
4439 		df->slab = folio_slab(folio);
4440 		df->s = df->slab->slab_cache;
4441 	} else {
4442 		df->slab = folio_slab(folio);
4443 		df->s = cache_from_obj(s, object); /* Support for memcg */
4444 	}
4445 
4446 	/* Start new detached freelist */
4447 	df->tail = object;
4448 	df->freelist = object;
4449 	df->cnt = 1;
4450 
4451 	if (is_kfence_address(object))
4452 		return size;
4453 
4454 	set_freepointer(df->s, object, NULL);
4455 
4456 	same = size;
4457 	while (size) {
4458 		object = p[--size];
4459 		/* df->slab is always set at this point */
4460 		if (df->slab == virt_to_slab(object)) {
4461 			/* Opportunity build freelist */
4462 			set_freepointer(df->s, object, df->freelist);
4463 			df->freelist = object;
4464 			df->cnt++;
4465 			same--;
4466 			if (size != same)
4467 				swap(p[size], p[same]);
4468 			continue;
4469 		}
4470 
4471 		/* Limit look ahead search */
4472 		if (!--lookahead)
4473 			break;
4474 	}
4475 
4476 	return same;
4477 }
4478 
4479 /*
4480  * Internal bulk free of objects that were not initialised by the post alloc
4481  * hooks and thus should not be processed by the free hooks
4482  */
4483 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4484 {
4485 	if (!size)
4486 		return;
4487 
4488 	do {
4489 		struct detached_freelist df;
4490 
4491 		size = build_detached_freelist(s, size, p, &df);
4492 		if (!df.slab)
4493 			continue;
4494 
4495 		do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
4496 			     _RET_IP_);
4497 	} while (likely(size));
4498 }
4499 
4500 /* Note that interrupts must be enabled when calling this function. */
4501 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4502 {
4503 	if (!size)
4504 		return;
4505 
4506 	do {
4507 		struct detached_freelist df;
4508 
4509 		size = build_detached_freelist(s, size, p, &df);
4510 		if (!df.slab)
4511 			continue;
4512 
4513 		slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
4514 			       df.cnt, _RET_IP_);
4515 	} while (likely(size));
4516 }
4517 EXPORT_SYMBOL(kmem_cache_free_bulk);
4518 
4519 #ifndef CONFIG_SLUB_TINY
4520 static inline
4521 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
4522 			    void **p)
4523 {
4524 	struct kmem_cache_cpu *c;
4525 	unsigned long irqflags;
4526 	int i;
4527 
4528 	/*
4529 	 * Drain objects in the per cpu slab, while disabling local
4530 	 * IRQs, which protects against PREEMPT and interrupts
4531 	 * handlers invoking normal fastpath.
4532 	 */
4533 	c = slub_get_cpu_ptr(s->cpu_slab);
4534 	local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4535 
4536 	for (i = 0; i < size; i++) {
4537 		void *object = kfence_alloc(s, s->object_size, flags);
4538 
4539 		if (unlikely(object)) {
4540 			p[i] = object;
4541 			continue;
4542 		}
4543 
4544 		object = c->freelist;
4545 		if (unlikely(!object)) {
4546 			/*
4547 			 * We may have removed an object from c->freelist using
4548 			 * the fastpath in the previous iteration; in that case,
4549 			 * c->tid has not been bumped yet.
4550 			 * Since ___slab_alloc() may reenable interrupts while
4551 			 * allocating memory, we should bump c->tid now.
4552 			 */
4553 			c->tid = next_tid(c->tid);
4554 
4555 			local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4556 
4557 			/*
4558 			 * Invoking slow path likely have side-effect
4559 			 * of re-populating per CPU c->freelist
4560 			 */
4561 			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
4562 					    _RET_IP_, c, s->object_size);
4563 			if (unlikely(!p[i]))
4564 				goto error;
4565 
4566 			c = this_cpu_ptr(s->cpu_slab);
4567 			maybe_wipe_obj_freeptr(s, p[i]);
4568 
4569 			local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4570 
4571 			continue; /* goto for-loop */
4572 		}
4573 		c->freelist = get_freepointer(s, object);
4574 		p[i] = object;
4575 		maybe_wipe_obj_freeptr(s, p[i]);
4576 		stat(s, ALLOC_FASTPATH);
4577 	}
4578 	c->tid = next_tid(c->tid);
4579 	local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4580 	slub_put_cpu_ptr(s->cpu_slab);
4581 
4582 	return i;
4583 
4584 error:
4585 	slub_put_cpu_ptr(s->cpu_slab);
4586 	__kmem_cache_free_bulk(s, i, p);
4587 	return 0;
4588 
4589 }
4590 #else /* CONFIG_SLUB_TINY */
4591 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
4592 				   size_t size, void **p)
4593 {
4594 	int i;
4595 
4596 	for (i = 0; i < size; i++) {
4597 		void *object = kfence_alloc(s, s->object_size, flags);
4598 
4599 		if (unlikely(object)) {
4600 			p[i] = object;
4601 			continue;
4602 		}
4603 
4604 		p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
4605 					 _RET_IP_, s->object_size);
4606 		if (unlikely(!p[i]))
4607 			goto error;
4608 
4609 		maybe_wipe_obj_freeptr(s, p[i]);
4610 	}
4611 
4612 	return i;
4613 
4614 error:
4615 	__kmem_cache_free_bulk(s, i, p);
4616 	return 0;
4617 }
4618 #endif /* CONFIG_SLUB_TINY */
4619 
4620 /* Note that interrupts must be enabled when calling this function. */
4621 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
4622 			  void **p)
4623 {
4624 	int i;
4625 	struct obj_cgroup *objcg = NULL;
4626 
4627 	if (!size)
4628 		return 0;
4629 
4630 	/* memcg and kmem_cache debug support */
4631 	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
4632 	if (unlikely(!s))
4633 		return 0;
4634 
4635 	i = __kmem_cache_alloc_bulk(s, flags, size, p);
4636 
4637 	/*
4638 	 * memcg and kmem_cache debug support and memory initialization.
4639 	 * Done outside of the IRQ disabled fastpath loop.
4640 	 */
4641 	if (likely(i != 0)) {
4642 		slab_post_alloc_hook(s, objcg, flags, size, p,
4643 			slab_want_init_on_alloc(flags, s), s->object_size);
4644 	} else {
4645 		memcg_slab_alloc_error_hook(s, size, objcg);
4646 	}
4647 
4648 	return i;
4649 }
4650 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
4651 
4652 
4653 /*
4654  * Object placement in a slab is made very easy because we always start at
4655  * offset 0. If we tune the size of the object to the alignment then we can
4656  * get the required alignment by putting one properly sized object after
4657  * another.
4658  *
4659  * Notice that the allocation order determines the sizes of the per cpu
4660  * caches. Each processor has always one slab available for allocations.
4661  * Increasing the allocation order reduces the number of times that slabs
4662  * must be moved on and off the partial lists and is therefore a factor in
4663  * locking overhead.
4664  */
4665 
4666 /*
4667  * Minimum / Maximum order of slab pages. This influences locking overhead
4668  * and slab fragmentation. A higher order reduces the number of partial slabs
4669  * and increases the number of allocations possible without having to
4670  * take the list_lock.
4671  */
4672 static unsigned int slub_min_order;
4673 static unsigned int slub_max_order =
4674 	IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
4675 static unsigned int slub_min_objects;
4676 
4677 /*
4678  * Calculate the order of allocation given an slab object size.
4679  *
4680  * The order of allocation has significant impact on performance and other
4681  * system components. Generally order 0 allocations should be preferred since
4682  * order 0 does not cause fragmentation in the page allocator. Larger objects
4683  * be problematic to put into order 0 slabs because there may be too much
4684  * unused space left. We go to a higher order if more than 1/16th of the slab
4685  * would be wasted.
4686  *
4687  * In order to reach satisfactory performance we must ensure that a minimum
4688  * number of objects is in one slab. Otherwise we may generate too much
4689  * activity on the partial lists which requires taking the list_lock. This is
4690  * less a concern for large slabs though which are rarely used.
4691  *
4692  * slab_max_order specifies the order where we begin to stop considering the
4693  * number of objects in a slab as critical. If we reach slab_max_order then
4694  * we try to keep the page order as low as possible. So we accept more waste
4695  * of space in favor of a small page order.
4696  *
4697  * Higher order allocations also allow the placement of more objects in a
4698  * slab and thereby reduce object handling overhead. If the user has
4699  * requested a higher minimum order then we start with that one instead of
4700  * the smallest order which will fit the object.
4701  */
4702 static inline unsigned int calc_slab_order(unsigned int size,
4703 		unsigned int min_order, unsigned int max_order,
4704 		unsigned int fract_leftover)
4705 {
4706 	unsigned int order;
4707 
4708 	for (order = min_order; order <= max_order; order++) {
4709 
4710 		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
4711 		unsigned int rem;
4712 
4713 		rem = slab_size % size;
4714 
4715 		if (rem <= slab_size / fract_leftover)
4716 			break;
4717 	}
4718 
4719 	return order;
4720 }
4721 
4722 static inline int calculate_order(unsigned int size)
4723 {
4724 	unsigned int order;
4725 	unsigned int min_objects;
4726 	unsigned int max_objects;
4727 	unsigned int min_order;
4728 
4729 	min_objects = slub_min_objects;
4730 	if (!min_objects) {
4731 		/*
4732 		 * Some architectures will only update present cpus when
4733 		 * onlining them, so don't trust the number if it's just 1. But
4734 		 * we also don't want to use nr_cpu_ids always, as on some other
4735 		 * architectures, there can be many possible cpus, but never
4736 		 * onlined. Here we compromise between trying to avoid too high
4737 		 * order on systems that appear larger than they are, and too
4738 		 * low order on systems that appear smaller than they are.
4739 		 */
4740 		unsigned int nr_cpus = num_present_cpus();
4741 		if (nr_cpus <= 1)
4742 			nr_cpus = nr_cpu_ids;
4743 		min_objects = 4 * (fls(nr_cpus) + 1);
4744 	}
4745 	/* min_objects can't be 0 because get_order(0) is undefined */
4746 	max_objects = max(order_objects(slub_max_order, size), 1U);
4747 	min_objects = min(min_objects, max_objects);
4748 
4749 	min_order = max_t(unsigned int, slub_min_order,
4750 			  get_order(min_objects * size));
4751 	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
4752 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
4753 
4754 	/*
4755 	 * Attempt to find best configuration for a slab. This works by first
4756 	 * attempting to generate a layout with the best possible configuration
4757 	 * and backing off gradually.
4758 	 *
4759 	 * We start with accepting at most 1/16 waste and try to find the
4760 	 * smallest order from min_objects-derived/slab_min_order up to
4761 	 * slab_max_order that will satisfy the constraint. Note that increasing
4762 	 * the order can only result in same or less fractional waste, not more.
4763 	 *
4764 	 * If that fails, we increase the acceptable fraction of waste and try
4765 	 * again. The last iteration with fraction of 1/2 would effectively
4766 	 * accept any waste and give us the order determined by min_objects, as
4767 	 * long as at least single object fits within slab_max_order.
4768 	 */
4769 	for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
4770 		order = calc_slab_order(size, min_order, slub_max_order,
4771 					fraction);
4772 		if (order <= slub_max_order)
4773 			return order;
4774 	}
4775 
4776 	/*
4777 	 * Doh this slab cannot be placed using slab_max_order.
4778 	 */
4779 	order = get_order(size);
4780 	if (order <= MAX_PAGE_ORDER)
4781 		return order;
4782 	return -ENOSYS;
4783 }
4784 
4785 static void
4786 init_kmem_cache_node(struct kmem_cache_node *n)
4787 {
4788 	n->nr_partial = 0;
4789 	spin_lock_init(&n->list_lock);
4790 	INIT_LIST_HEAD(&n->partial);
4791 #ifdef CONFIG_SLUB_DEBUG
4792 	atomic_long_set(&n->nr_slabs, 0);
4793 	atomic_long_set(&n->total_objects, 0);
4794 	INIT_LIST_HEAD(&n->full);
4795 #endif
4796 }
4797 
4798 #ifndef CONFIG_SLUB_TINY
4799 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4800 {
4801 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
4802 			NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
4803 			sizeof(struct kmem_cache_cpu));
4804 
4805 	/*
4806 	 * Must align to double word boundary for the double cmpxchg
4807 	 * instructions to work; see __pcpu_double_call_return_bool().
4808 	 */
4809 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
4810 				     2 * sizeof(void *));
4811 
4812 	if (!s->cpu_slab)
4813 		return 0;
4814 
4815 	init_kmem_cache_cpus(s);
4816 
4817 	return 1;
4818 }
4819 #else
4820 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4821 {
4822 	return 1;
4823 }
4824 #endif /* CONFIG_SLUB_TINY */
4825 
4826 static struct kmem_cache *kmem_cache_node;
4827 
4828 /*
4829  * No kmalloc_node yet so do it by hand. We know that this is the first
4830  * slab on the node for this slabcache. There are no concurrent accesses
4831  * possible.
4832  *
4833  * Note that this function only works on the kmem_cache_node
4834  * when allocating for the kmem_cache_node. This is used for bootstrapping
4835  * memory on a fresh node that has no slab structures yet.
4836  */
4837 static void early_kmem_cache_node_alloc(int node)
4838 {
4839 	struct slab *slab;
4840 	struct kmem_cache_node *n;
4841 
4842 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
4843 
4844 	slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
4845 
4846 	BUG_ON(!slab);
4847 	if (slab_nid(slab) != node) {
4848 		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
4849 		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
4850 	}
4851 
4852 	n = slab->freelist;
4853 	BUG_ON(!n);
4854 #ifdef CONFIG_SLUB_DEBUG
4855 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
4856 	init_tracking(kmem_cache_node, n);
4857 #endif
4858 	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
4859 	slab->freelist = get_freepointer(kmem_cache_node, n);
4860 	slab->inuse = 1;
4861 	kmem_cache_node->node[node] = n;
4862 	init_kmem_cache_node(n);
4863 	inc_slabs_node(kmem_cache_node, node, slab->objects);
4864 
4865 	/*
4866 	 * No locks need to be taken here as it has just been
4867 	 * initialized and there is no concurrent access.
4868 	 */
4869 	__add_partial(n, slab, DEACTIVATE_TO_HEAD);
4870 }
4871 
4872 static void free_kmem_cache_nodes(struct kmem_cache *s)
4873 {
4874 	int node;
4875 	struct kmem_cache_node *n;
4876 
4877 	for_each_kmem_cache_node(s, node, n) {
4878 		s->node[node] = NULL;
4879 		kmem_cache_free(kmem_cache_node, n);
4880 	}
4881 }
4882 
4883 void __kmem_cache_release(struct kmem_cache *s)
4884 {
4885 	cache_random_seq_destroy(s);
4886 #ifndef CONFIG_SLUB_TINY
4887 	free_percpu(s->cpu_slab);
4888 #endif
4889 	free_kmem_cache_nodes(s);
4890 }
4891 
4892 static int init_kmem_cache_nodes(struct kmem_cache *s)
4893 {
4894 	int node;
4895 
4896 	for_each_node_mask(node, slab_nodes) {
4897 		struct kmem_cache_node *n;
4898 
4899 		if (slab_state == DOWN) {
4900 			early_kmem_cache_node_alloc(node);
4901 			continue;
4902 		}
4903 		n = kmem_cache_alloc_node(kmem_cache_node,
4904 						GFP_KERNEL, node);
4905 
4906 		if (!n) {
4907 			free_kmem_cache_nodes(s);
4908 			return 0;
4909 		}
4910 
4911 		init_kmem_cache_node(n);
4912 		s->node[node] = n;
4913 	}
4914 	return 1;
4915 }
4916 
4917 static void set_cpu_partial(struct kmem_cache *s)
4918 {
4919 #ifdef CONFIG_SLUB_CPU_PARTIAL
4920 	unsigned int nr_objects;
4921 
4922 	/*
4923 	 * cpu_partial determined the maximum number of objects kept in the
4924 	 * per cpu partial lists of a processor.
4925 	 *
4926 	 * Per cpu partial lists mainly contain slabs that just have one
4927 	 * object freed. If they are used for allocation then they can be
4928 	 * filled up again with minimal effort. The slab will never hit the
4929 	 * per node partial lists and therefore no locking will be required.
4930 	 *
4931 	 * For backwards compatibility reasons, this is determined as number
4932 	 * of objects, even though we now limit maximum number of pages, see
4933 	 * slub_set_cpu_partial()
4934 	 */
4935 	if (!kmem_cache_has_cpu_partial(s))
4936 		nr_objects = 0;
4937 	else if (s->size >= PAGE_SIZE)
4938 		nr_objects = 6;
4939 	else if (s->size >= 1024)
4940 		nr_objects = 24;
4941 	else if (s->size >= 256)
4942 		nr_objects = 52;
4943 	else
4944 		nr_objects = 120;
4945 
4946 	slub_set_cpu_partial(s, nr_objects);
4947 #endif
4948 }
4949 
4950 /*
4951  * calculate_sizes() determines the order and the distribution of data within
4952  * a slab object.
4953  */
4954 static int calculate_sizes(struct kmem_cache *s)
4955 {
4956 	slab_flags_t flags = s->flags;
4957 	unsigned int size = s->object_size;
4958 	unsigned int order;
4959 
4960 	/*
4961 	 * Round up object size to the next word boundary. We can only
4962 	 * place the free pointer at word boundaries and this determines
4963 	 * the possible location of the free pointer.
4964 	 */
4965 	size = ALIGN(size, sizeof(void *));
4966 
4967 #ifdef CONFIG_SLUB_DEBUG
4968 	/*
4969 	 * Determine if we can poison the object itself. If the user of
4970 	 * the slab may touch the object after free or before allocation
4971 	 * then we should never poison the object itself.
4972 	 */
4973 	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
4974 			!s->ctor)
4975 		s->flags |= __OBJECT_POISON;
4976 	else
4977 		s->flags &= ~__OBJECT_POISON;
4978 
4979 
4980 	/*
4981 	 * If we are Redzoning then check if there is some space between the
4982 	 * end of the object and the free pointer. If not then add an
4983 	 * additional word to have some bytes to store Redzone information.
4984 	 */
4985 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
4986 		size += sizeof(void *);
4987 #endif
4988 
4989 	/*
4990 	 * With that we have determined the number of bytes in actual use
4991 	 * by the object and redzoning.
4992 	 */
4993 	s->inuse = size;
4994 
4995 	if (slub_debug_orig_size(s) ||
4996 	    (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
4997 	    ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
4998 	    s->ctor) {
4999 		/*
5000 		 * Relocate free pointer after the object if it is not
5001 		 * permitted to overwrite the first word of the object on
5002 		 * kmem_cache_free.
5003 		 *
5004 		 * This is the case if we do RCU, have a constructor or
5005 		 * destructor, are poisoning the objects, or are
5006 		 * redzoning an object smaller than sizeof(void *).
5007 		 *
5008 		 * The assumption that s->offset >= s->inuse means free
5009 		 * pointer is outside of the object is used in the
5010 		 * freeptr_outside_object() function. If that is no
5011 		 * longer true, the function needs to be modified.
5012 		 */
5013 		s->offset = size;
5014 		size += sizeof(void *);
5015 	} else {
5016 		/*
5017 		 * Store freelist pointer near middle of object to keep
5018 		 * it away from the edges of the object to avoid small
5019 		 * sized over/underflows from neighboring allocations.
5020 		 */
5021 		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
5022 	}
5023 
5024 #ifdef CONFIG_SLUB_DEBUG
5025 	if (flags & SLAB_STORE_USER) {
5026 		/*
5027 		 * Need to store information about allocs and frees after
5028 		 * the object.
5029 		 */
5030 		size += 2 * sizeof(struct track);
5031 
5032 		/* Save the original kmalloc request size */
5033 		if (flags & SLAB_KMALLOC)
5034 			size += sizeof(unsigned int);
5035 	}
5036 #endif
5037 
5038 	kasan_cache_create(s, &size, &s->flags);
5039 #ifdef CONFIG_SLUB_DEBUG
5040 	if (flags & SLAB_RED_ZONE) {
5041 		/*
5042 		 * Add some empty padding so that we can catch
5043 		 * overwrites from earlier objects rather than let
5044 		 * tracking information or the free pointer be
5045 		 * corrupted if a user writes before the start
5046 		 * of the object.
5047 		 */
5048 		size += sizeof(void *);
5049 
5050 		s->red_left_pad = sizeof(void *);
5051 		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
5052 		size += s->red_left_pad;
5053 	}
5054 #endif
5055 
5056 	/*
5057 	 * SLUB stores one object immediately after another beginning from
5058 	 * offset 0. In order to align the objects we have to simply size
5059 	 * each object to conform to the alignment.
5060 	 */
5061 	size = ALIGN(size, s->align);
5062 	s->size = size;
5063 	s->reciprocal_size = reciprocal_value(size);
5064 	order = calculate_order(size);
5065 
5066 	if ((int)order < 0)
5067 		return 0;
5068 
5069 	s->allocflags = 0;
5070 	if (order)
5071 		s->allocflags |= __GFP_COMP;
5072 
5073 	if (s->flags & SLAB_CACHE_DMA)
5074 		s->allocflags |= GFP_DMA;
5075 
5076 	if (s->flags & SLAB_CACHE_DMA32)
5077 		s->allocflags |= GFP_DMA32;
5078 
5079 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5080 		s->allocflags |= __GFP_RECLAIMABLE;
5081 
5082 	/*
5083 	 * Determine the number of objects per slab
5084 	 */
5085 	s->oo = oo_make(order, size);
5086 	s->min = oo_make(get_order(size), size);
5087 
5088 	return !!oo_objects(s->oo);
5089 }
5090 
5091 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
5092 {
5093 	s->flags = kmem_cache_flags(flags, s->name);
5094 #ifdef CONFIG_SLAB_FREELIST_HARDENED
5095 	s->random = get_random_long();
5096 #endif
5097 
5098 	if (!calculate_sizes(s))
5099 		goto error;
5100 	if (disable_higher_order_debug) {
5101 		/*
5102 		 * Disable debugging flags that store metadata if the min slab
5103 		 * order increased.
5104 		 */
5105 		if (get_order(s->size) > get_order(s->object_size)) {
5106 			s->flags &= ~DEBUG_METADATA_FLAGS;
5107 			s->offset = 0;
5108 			if (!calculate_sizes(s))
5109 				goto error;
5110 		}
5111 	}
5112 
5113 #ifdef system_has_freelist_aba
5114 	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
5115 		/* Enable fast mode */
5116 		s->flags |= __CMPXCHG_DOUBLE;
5117 	}
5118 #endif
5119 
5120 	/*
5121 	 * The larger the object size is, the more slabs we want on the partial
5122 	 * list to avoid pounding the page allocator excessively.
5123 	 */
5124 	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
5125 	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
5126 
5127 	set_cpu_partial(s);
5128 
5129 #ifdef CONFIG_NUMA
5130 	s->remote_node_defrag_ratio = 1000;
5131 #endif
5132 
5133 	/* Initialize the pre-computed randomized freelist if slab is up */
5134 	if (slab_state >= UP) {
5135 		if (init_cache_random_seq(s))
5136 			goto error;
5137 	}
5138 
5139 	if (!init_kmem_cache_nodes(s))
5140 		goto error;
5141 
5142 	if (alloc_kmem_cache_cpus(s))
5143 		return 0;
5144 
5145 error:
5146 	__kmem_cache_release(s);
5147 	return -EINVAL;
5148 }
5149 
5150 static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
5151 			      const char *text)
5152 {
5153 #ifdef CONFIG_SLUB_DEBUG
5154 	void *addr = slab_address(slab);
5155 	void *p;
5156 
5157 	slab_err(s, slab, text, s->name);
5158 
5159 	spin_lock(&object_map_lock);
5160 	__fill_map(object_map, s, slab);
5161 
5162 	for_each_object(p, s, addr, slab->objects) {
5163 
5164 		if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
5165 			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
5166 			print_tracking(s, p);
5167 		}
5168 	}
5169 	spin_unlock(&object_map_lock);
5170 #endif
5171 }
5172 
5173 /*
5174  * Attempt to free all partial slabs on a node.
5175  * This is called from __kmem_cache_shutdown(). We must take list_lock
5176  * because sysfs file might still access partial list after the shutdowning.
5177  */
5178 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
5179 {
5180 	LIST_HEAD(discard);
5181 	struct slab *slab, *h;
5182 
5183 	BUG_ON(irqs_disabled());
5184 	spin_lock_irq(&n->list_lock);
5185 	list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
5186 		if (!slab->inuse) {
5187 			remove_partial(n, slab);
5188 			list_add(&slab->slab_list, &discard);
5189 		} else {
5190 			list_slab_objects(s, slab,
5191 			  "Objects remaining in %s on __kmem_cache_shutdown()");
5192 		}
5193 	}
5194 	spin_unlock_irq(&n->list_lock);
5195 
5196 	list_for_each_entry_safe(slab, h, &discard, slab_list)
5197 		discard_slab(s, slab);
5198 }
5199 
5200 bool __kmem_cache_empty(struct kmem_cache *s)
5201 {
5202 	int node;
5203 	struct kmem_cache_node *n;
5204 
5205 	for_each_kmem_cache_node(s, node, n)
5206 		if (n->nr_partial || node_nr_slabs(n))
5207 			return false;
5208 	return true;
5209 }
5210 
5211 /*
5212  * Release all resources used by a slab cache.
5213  */
5214 int __kmem_cache_shutdown(struct kmem_cache *s)
5215 {
5216 	int node;
5217 	struct kmem_cache_node *n;
5218 
5219 	flush_all_cpus_locked(s);
5220 	/* Attempt to free all objects */
5221 	for_each_kmem_cache_node(s, node, n) {
5222 		free_partial(s, n);
5223 		if (n->nr_partial || node_nr_slabs(n))
5224 			return 1;
5225 	}
5226 	return 0;
5227 }
5228 
5229 #ifdef CONFIG_PRINTK
5230 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
5231 {
5232 	void *base;
5233 	int __maybe_unused i;
5234 	unsigned int objnr;
5235 	void *objp;
5236 	void *objp0;
5237 	struct kmem_cache *s = slab->slab_cache;
5238 	struct track __maybe_unused *trackp;
5239 
5240 	kpp->kp_ptr = object;
5241 	kpp->kp_slab = slab;
5242 	kpp->kp_slab_cache = s;
5243 	base = slab_address(slab);
5244 	objp0 = kasan_reset_tag(object);
5245 #ifdef CONFIG_SLUB_DEBUG
5246 	objp = restore_red_left(s, objp0);
5247 #else
5248 	objp = objp0;
5249 #endif
5250 	objnr = obj_to_index(s, slab, objp);
5251 	kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
5252 	objp = base + s->size * objnr;
5253 	kpp->kp_objp = objp;
5254 	if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
5255 			 || (objp - base) % s->size) ||
5256 	    !(s->flags & SLAB_STORE_USER))
5257 		return;
5258 #ifdef CONFIG_SLUB_DEBUG
5259 	objp = fixup_red_left(s, objp);
5260 	trackp = get_track(s, objp, TRACK_ALLOC);
5261 	kpp->kp_ret = (void *)trackp->addr;
5262 #ifdef CONFIG_STACKDEPOT
5263 	{
5264 		depot_stack_handle_t handle;
5265 		unsigned long *entries;
5266 		unsigned int nr_entries;
5267 
5268 		handle = READ_ONCE(trackp->handle);
5269 		if (handle) {
5270 			nr_entries = stack_depot_fetch(handle, &entries);
5271 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5272 				kpp->kp_stack[i] = (void *)entries[i];
5273 		}
5274 
5275 		trackp = get_track(s, objp, TRACK_FREE);
5276 		handle = READ_ONCE(trackp->handle);
5277 		if (handle) {
5278 			nr_entries = stack_depot_fetch(handle, &entries);
5279 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5280 				kpp->kp_free_stack[i] = (void *)entries[i];
5281 		}
5282 	}
5283 #endif
5284 #endif
5285 }
5286 #endif
5287 
5288 /********************************************************************
5289  *		Kmalloc subsystem
5290  *******************************************************************/
5291 
5292 static int __init setup_slub_min_order(char *str)
5293 {
5294 	get_option(&str, (int *)&slub_min_order);
5295 
5296 	if (slub_min_order > slub_max_order)
5297 		slub_max_order = slub_min_order;
5298 
5299 	return 1;
5300 }
5301 
5302 __setup("slab_min_order=", setup_slub_min_order);
5303 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0);
5304 
5305 
5306 static int __init setup_slub_max_order(char *str)
5307 {
5308 	get_option(&str, (int *)&slub_max_order);
5309 	slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
5310 
5311 	if (slub_min_order > slub_max_order)
5312 		slub_min_order = slub_max_order;
5313 
5314 	return 1;
5315 }
5316 
5317 __setup("slab_max_order=", setup_slub_max_order);
5318 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0);
5319 
5320 static int __init setup_slub_min_objects(char *str)
5321 {
5322 	get_option(&str, (int *)&slub_min_objects);
5323 
5324 	return 1;
5325 }
5326 
5327 __setup("slab_min_objects=", setup_slub_min_objects);
5328 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);
5329 
5330 #ifdef CONFIG_HARDENED_USERCOPY
5331 /*
5332  * Rejects incorrectly sized objects and objects that are to be copied
5333  * to/from userspace but do not fall entirely within the containing slab
5334  * cache's usercopy region.
5335  *
5336  * Returns NULL if check passes, otherwise const char * to name of cache
5337  * to indicate an error.
5338  */
5339 void __check_heap_object(const void *ptr, unsigned long n,
5340 			 const struct slab *slab, bool to_user)
5341 {
5342 	struct kmem_cache *s;
5343 	unsigned int offset;
5344 	bool is_kfence = is_kfence_address(ptr);
5345 
5346 	ptr = kasan_reset_tag(ptr);
5347 
5348 	/* Find object and usable object size. */
5349 	s = slab->slab_cache;
5350 
5351 	/* Reject impossible pointers. */
5352 	if (ptr < slab_address(slab))
5353 		usercopy_abort("SLUB object not in SLUB page?!", NULL,
5354 			       to_user, 0, n);
5355 
5356 	/* Find offset within object. */
5357 	if (is_kfence)
5358 		offset = ptr - kfence_object_start(ptr);
5359 	else
5360 		offset = (ptr - slab_address(slab)) % s->size;
5361 
5362 	/* Adjust for redzone and reject if within the redzone. */
5363 	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
5364 		if (offset < s->red_left_pad)
5365 			usercopy_abort("SLUB object in left red zone",
5366 				       s->name, to_user, offset, n);
5367 		offset -= s->red_left_pad;
5368 	}
5369 
5370 	/* Allow address range falling entirely within usercopy region. */
5371 	if (offset >= s->useroffset &&
5372 	    offset - s->useroffset <= s->usersize &&
5373 	    n <= s->useroffset - offset + s->usersize)
5374 		return;
5375 
5376 	usercopy_abort("SLUB object", s->name, to_user, offset, n);
5377 }
5378 #endif /* CONFIG_HARDENED_USERCOPY */
5379 
5380 #define SHRINK_PROMOTE_MAX 32
5381 
5382 /*
5383  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
5384  * up most to the head of the partial lists. New allocations will then
5385  * fill those up and thus they can be removed from the partial lists.
5386  *
5387  * The slabs with the least items are placed last. This results in them
5388  * being allocated from last increasing the chance that the last objects
5389  * are freed in them.
5390  */
5391 static int __kmem_cache_do_shrink(struct kmem_cache *s)
5392 {
5393 	int node;
5394 	int i;
5395 	struct kmem_cache_node *n;
5396 	struct slab *slab;
5397 	struct slab *t;
5398 	struct list_head discard;
5399 	struct list_head promote[SHRINK_PROMOTE_MAX];
5400 	unsigned long flags;
5401 	int ret = 0;
5402 
5403 	for_each_kmem_cache_node(s, node, n) {
5404 		INIT_LIST_HEAD(&discard);
5405 		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
5406 			INIT_LIST_HEAD(promote + i);
5407 
5408 		spin_lock_irqsave(&n->list_lock, flags);
5409 
5410 		/*
5411 		 * Build lists of slabs to discard or promote.
5412 		 *
5413 		 * Note that concurrent frees may occur while we hold the
5414 		 * list_lock. slab->inuse here is the upper limit.
5415 		 */
5416 		list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
5417 			int free = slab->objects - slab->inuse;
5418 
5419 			/* Do not reread slab->inuse */
5420 			barrier();
5421 
5422 			/* We do not keep full slabs on the list */
5423 			BUG_ON(free <= 0);
5424 
5425 			if (free == slab->objects) {
5426 				list_move(&slab->slab_list, &discard);
5427 				slab_clear_node_partial(slab);
5428 				n->nr_partial--;
5429 				dec_slabs_node(s, node, slab->objects);
5430 			} else if (free <= SHRINK_PROMOTE_MAX)
5431 				list_move(&slab->slab_list, promote + free - 1);
5432 		}
5433 
5434 		/*
5435 		 * Promote the slabs filled up most to the head of the
5436 		 * partial list.
5437 		 */
5438 		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
5439 			list_splice(promote + i, &n->partial);
5440 
5441 		spin_unlock_irqrestore(&n->list_lock, flags);
5442 
5443 		/* Release empty slabs */
5444 		list_for_each_entry_safe(slab, t, &discard, slab_list)
5445 			free_slab(s, slab);
5446 
5447 		if (node_nr_slabs(n))
5448 			ret = 1;
5449 	}
5450 
5451 	return ret;
5452 }
5453 
5454 int __kmem_cache_shrink(struct kmem_cache *s)
5455 {
5456 	flush_all(s);
5457 	return __kmem_cache_do_shrink(s);
5458 }
5459 
5460 static int slab_mem_going_offline_callback(void *arg)
5461 {
5462 	struct kmem_cache *s;
5463 
5464 	mutex_lock(&slab_mutex);
5465 	list_for_each_entry(s, &slab_caches, list) {
5466 		flush_all_cpus_locked(s);
5467 		__kmem_cache_do_shrink(s);
5468 	}
5469 	mutex_unlock(&slab_mutex);
5470 
5471 	return 0;
5472 }
5473 
5474 static void slab_mem_offline_callback(void *arg)
5475 {
5476 	struct memory_notify *marg = arg;
5477 	int offline_node;
5478 
5479 	offline_node = marg->status_change_nid_normal;
5480 
5481 	/*
5482 	 * If the node still has available memory. we need kmem_cache_node
5483 	 * for it yet.
5484 	 */
5485 	if (offline_node < 0)
5486 		return;
5487 
5488 	mutex_lock(&slab_mutex);
5489 	node_clear(offline_node, slab_nodes);
5490 	/*
5491 	 * We no longer free kmem_cache_node structures here, as it would be
5492 	 * racy with all get_node() users, and infeasible to protect them with
5493 	 * slab_mutex.
5494 	 */
5495 	mutex_unlock(&slab_mutex);
5496 }
5497 
5498 static int slab_mem_going_online_callback(void *arg)
5499 {
5500 	struct kmem_cache_node *n;
5501 	struct kmem_cache *s;
5502 	struct memory_notify *marg = arg;
5503 	int nid = marg->status_change_nid_normal;
5504 	int ret = 0;
5505 
5506 	/*
5507 	 * If the node's memory is already available, then kmem_cache_node is
5508 	 * already created. Nothing to do.
5509 	 */
5510 	if (nid < 0)
5511 		return 0;
5512 
5513 	/*
5514 	 * We are bringing a node online. No memory is available yet. We must
5515 	 * allocate a kmem_cache_node structure in order to bring the node
5516 	 * online.
5517 	 */
5518 	mutex_lock(&slab_mutex);
5519 	list_for_each_entry(s, &slab_caches, list) {
5520 		/*
5521 		 * The structure may already exist if the node was previously
5522 		 * onlined and offlined.
5523 		 */
5524 		if (get_node(s, nid))
5525 			continue;
5526 		/*
5527 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
5528 		 *      since memory is not yet available from the node that
5529 		 *      is brought up.
5530 		 */
5531 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
5532 		if (!n) {
5533 			ret = -ENOMEM;
5534 			goto out;
5535 		}
5536 		init_kmem_cache_node(n);
5537 		s->node[nid] = n;
5538 	}
5539 	/*
5540 	 * Any cache created after this point will also have kmem_cache_node
5541 	 * initialized for the new node.
5542 	 */
5543 	node_set(nid, slab_nodes);
5544 out:
5545 	mutex_unlock(&slab_mutex);
5546 	return ret;
5547 }
5548 
5549 static int slab_memory_callback(struct notifier_block *self,
5550 				unsigned long action, void *arg)
5551 {
5552 	int ret = 0;
5553 
5554 	switch (action) {
5555 	case MEM_GOING_ONLINE:
5556 		ret = slab_mem_going_online_callback(arg);
5557 		break;
5558 	case MEM_GOING_OFFLINE:
5559 		ret = slab_mem_going_offline_callback(arg);
5560 		break;
5561 	case MEM_OFFLINE:
5562 	case MEM_CANCEL_ONLINE:
5563 		slab_mem_offline_callback(arg);
5564 		break;
5565 	case MEM_ONLINE:
5566 	case MEM_CANCEL_OFFLINE:
5567 		break;
5568 	}
5569 	if (ret)
5570 		ret = notifier_from_errno(ret);
5571 	else
5572 		ret = NOTIFY_OK;
5573 	return ret;
5574 }
5575 
5576 /********************************************************************
5577  *			Basic setup of slabs
5578  *******************************************************************/
5579 
5580 /*
5581  * Used for early kmem_cache structures that were allocated using
5582  * the page allocator. Allocate them properly then fix up the pointers
5583  * that may be pointing to the wrong kmem_cache structure.
5584  */
5585 
5586 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
5587 {
5588 	int node;
5589 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
5590 	struct kmem_cache_node *n;
5591 
5592 	memcpy(s, static_cache, kmem_cache->object_size);
5593 
5594 	/*
5595 	 * This runs very early, and only the boot processor is supposed to be
5596 	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
5597 	 * IPIs around.
5598 	 */
5599 	__flush_cpu_slab(s, smp_processor_id());
5600 	for_each_kmem_cache_node(s, node, n) {
5601 		struct slab *p;
5602 
5603 		list_for_each_entry(p, &n->partial, slab_list)
5604 			p->slab_cache = s;
5605 
5606 #ifdef CONFIG_SLUB_DEBUG
5607 		list_for_each_entry(p, &n->full, slab_list)
5608 			p->slab_cache = s;
5609 #endif
5610 	}
5611 	list_add(&s->list, &slab_caches);
5612 	return s;
5613 }
5614 
5615 void __init kmem_cache_init(void)
5616 {
5617 	static __initdata struct kmem_cache boot_kmem_cache,
5618 		boot_kmem_cache_node;
5619 	int node;
5620 
5621 	if (debug_guardpage_minorder())
5622 		slub_max_order = 0;
5623 
5624 	/* Print slub debugging pointers without hashing */
5625 	if (__slub_debug_enabled())
5626 		no_hash_pointers_enable(NULL);
5627 
5628 	kmem_cache_node = &boot_kmem_cache_node;
5629 	kmem_cache = &boot_kmem_cache;
5630 
5631 	/*
5632 	 * Initialize the nodemask for which we will allocate per node
5633 	 * structures. Here we don't need taking slab_mutex yet.
5634 	 */
5635 	for_each_node_state(node, N_NORMAL_MEMORY)
5636 		node_set(node, slab_nodes);
5637 
5638 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
5639 		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
5640 
5641 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
5642 
5643 	/* Able to allocate the per node structures */
5644 	slab_state = PARTIAL;
5645 
5646 	create_boot_cache(kmem_cache, "kmem_cache",
5647 			offsetof(struct kmem_cache, node) +
5648 				nr_node_ids * sizeof(struct kmem_cache_node *),
5649 		       SLAB_HWCACHE_ALIGN, 0, 0);
5650 
5651 	kmem_cache = bootstrap(&boot_kmem_cache);
5652 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
5653 
5654 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
5655 	setup_kmalloc_cache_index_table();
5656 	create_kmalloc_caches();
5657 
5658 	/* Setup random freelists for each cache */
5659 	init_freelist_randomization();
5660 
5661 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
5662 				  slub_cpu_dead);
5663 
5664 	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
5665 		cache_line_size(),
5666 		slub_min_order, slub_max_order, slub_min_objects,
5667 		nr_cpu_ids, nr_node_ids);
5668 }
5669 
5670 void __init kmem_cache_init_late(void)
5671 {
5672 #ifndef CONFIG_SLUB_TINY
5673 	flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
5674 	WARN_ON(!flushwq);
5675 #endif
5676 }
5677 
5678 struct kmem_cache *
5679 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
5680 		   slab_flags_t flags, void (*ctor)(void *))
5681 {
5682 	struct kmem_cache *s;
5683 
5684 	s = find_mergeable(size, align, flags, name, ctor);
5685 	if (s) {
5686 		if (sysfs_slab_alias(s, name))
5687 			return NULL;
5688 
5689 		s->refcount++;
5690 
5691 		/*
5692 		 * Adjust the object sizes so that we clear
5693 		 * the complete object on kzalloc.
5694 		 */
5695 		s->object_size = max(s->object_size, size);
5696 		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
5697 	}
5698 
5699 	return s;
5700 }
5701 
5702 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
5703 {
5704 	int err;
5705 
5706 	err = kmem_cache_open(s, flags);
5707 	if (err)
5708 		return err;
5709 
5710 	/* Mutex is not taken during early boot */
5711 	if (slab_state <= UP)
5712 		return 0;
5713 
5714 	err = sysfs_slab_add(s);
5715 	if (err) {
5716 		__kmem_cache_release(s);
5717 		return err;
5718 	}
5719 
5720 	if (s->flags & SLAB_STORE_USER)
5721 		debugfs_slab_add(s);
5722 
5723 	return 0;
5724 }
5725 
5726 #ifdef SLAB_SUPPORTS_SYSFS
5727 static int count_inuse(struct slab *slab)
5728 {
5729 	return slab->inuse;
5730 }
5731 
5732 static int count_total(struct slab *slab)
5733 {
5734 	return slab->objects;
5735 }
5736 #endif
5737 
5738 #ifdef CONFIG_SLUB_DEBUG
5739 static void validate_slab(struct kmem_cache *s, struct slab *slab,
5740 			  unsigned long *obj_map)
5741 {
5742 	void *p;
5743 	void *addr = slab_address(slab);
5744 
5745 	if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
5746 		return;
5747 
5748 	/* Now we know that a valid freelist exists */
5749 	__fill_map(obj_map, s, slab);
5750 	for_each_object(p, s, addr, slab->objects) {
5751 		u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
5752 			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
5753 
5754 		if (!check_object(s, slab, p, val))
5755 			break;
5756 	}
5757 }
5758 
5759 static int validate_slab_node(struct kmem_cache *s,
5760 		struct kmem_cache_node *n, unsigned long *obj_map)
5761 {
5762 	unsigned long count = 0;
5763 	struct slab *slab;
5764 	unsigned long flags;
5765 
5766 	spin_lock_irqsave(&n->list_lock, flags);
5767 
5768 	list_for_each_entry(slab, &n->partial, slab_list) {
5769 		validate_slab(s, slab, obj_map);
5770 		count++;
5771 	}
5772 	if (count != n->nr_partial) {
5773 		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
5774 		       s->name, count, n->nr_partial);
5775 		slab_add_kunit_errors();
5776 	}
5777 
5778 	if (!(s->flags & SLAB_STORE_USER))
5779 		goto out;
5780 
5781 	list_for_each_entry(slab, &n->full, slab_list) {
5782 		validate_slab(s, slab, obj_map);
5783 		count++;
5784 	}
5785 	if (count != node_nr_slabs(n)) {
5786 		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
5787 		       s->name, count, node_nr_slabs(n));
5788 		slab_add_kunit_errors();
5789 	}
5790 
5791 out:
5792 	spin_unlock_irqrestore(&n->list_lock, flags);
5793 	return count;
5794 }
5795 
5796 long validate_slab_cache(struct kmem_cache *s)
5797 {
5798 	int node;
5799 	unsigned long count = 0;
5800 	struct kmem_cache_node *n;
5801 	unsigned long *obj_map;
5802 
5803 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
5804 	if (!obj_map)
5805 		return -ENOMEM;
5806 
5807 	flush_all(s);
5808 	for_each_kmem_cache_node(s, node, n)
5809 		count += validate_slab_node(s, n, obj_map);
5810 
5811 	bitmap_free(obj_map);
5812 
5813 	return count;
5814 }
5815 EXPORT_SYMBOL(validate_slab_cache);
5816 
5817 #ifdef CONFIG_DEBUG_FS
5818 /*
5819  * Generate lists of code addresses where slabcache objects are allocated
5820  * and freed.
5821  */
5822 
5823 struct location {
5824 	depot_stack_handle_t handle;
5825 	unsigned long count;
5826 	unsigned long addr;
5827 	unsigned long waste;
5828 	long long sum_time;
5829 	long min_time;
5830 	long max_time;
5831 	long min_pid;
5832 	long max_pid;
5833 	DECLARE_BITMAP(cpus, NR_CPUS);
5834 	nodemask_t nodes;
5835 };
5836 
5837 struct loc_track {
5838 	unsigned long max;
5839 	unsigned long count;
5840 	struct location *loc;
5841 	loff_t idx;
5842 };
5843 
5844 static struct dentry *slab_debugfs_root;
5845 
5846 static void free_loc_track(struct loc_track *t)
5847 {
5848 	if (t->max)
5849 		free_pages((unsigned long)t->loc,
5850 			get_order(sizeof(struct location) * t->max));
5851 }
5852 
5853 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
5854 {
5855 	struct location *l;
5856 	int order;
5857 
5858 	order = get_order(sizeof(struct location) * max);
5859 
5860 	l = (void *)__get_free_pages(flags, order);
5861 	if (!l)
5862 		return 0;
5863 
5864 	if (t->count) {
5865 		memcpy(l, t->loc, sizeof(struct location) * t->count);
5866 		free_loc_track(t);
5867 	}
5868 	t->max = max;
5869 	t->loc = l;
5870 	return 1;
5871 }
5872 
5873 static int add_location(struct loc_track *t, struct kmem_cache *s,
5874 				const struct track *track,
5875 				unsigned int orig_size)
5876 {
5877 	long start, end, pos;
5878 	struct location *l;
5879 	unsigned long caddr, chandle, cwaste;
5880 	unsigned long age = jiffies - track->when;
5881 	depot_stack_handle_t handle = 0;
5882 	unsigned int waste = s->object_size - orig_size;
5883 
5884 #ifdef CONFIG_STACKDEPOT
5885 	handle = READ_ONCE(track->handle);
5886 #endif
5887 	start = -1;
5888 	end = t->count;
5889 
5890 	for ( ; ; ) {
5891 		pos = start + (end - start + 1) / 2;
5892 
5893 		/*
5894 		 * There is nothing at "end". If we end up there
5895 		 * we need to add something to before end.
5896 		 */
5897 		if (pos == end)
5898 			break;
5899 
5900 		l = &t->loc[pos];
5901 		caddr = l->addr;
5902 		chandle = l->handle;
5903 		cwaste = l->waste;
5904 		if ((track->addr == caddr) && (handle == chandle) &&
5905 			(waste == cwaste)) {
5906 
5907 			l->count++;
5908 			if (track->when) {
5909 				l->sum_time += age;
5910 				if (age < l->min_time)
5911 					l->min_time = age;
5912 				if (age > l->max_time)
5913 					l->max_time = age;
5914 
5915 				if (track->pid < l->min_pid)
5916 					l->min_pid = track->pid;
5917 				if (track->pid > l->max_pid)
5918 					l->max_pid = track->pid;
5919 
5920 				cpumask_set_cpu(track->cpu,
5921 						to_cpumask(l->cpus));
5922 			}
5923 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
5924 			return 1;
5925 		}
5926 
5927 		if (track->addr < caddr)
5928 			end = pos;
5929 		else if (track->addr == caddr && handle < chandle)
5930 			end = pos;
5931 		else if (track->addr == caddr && handle == chandle &&
5932 				waste < cwaste)
5933 			end = pos;
5934 		else
5935 			start = pos;
5936 	}
5937 
5938 	/*
5939 	 * Not found. Insert new tracking element.
5940 	 */
5941 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
5942 		return 0;
5943 
5944 	l = t->loc + pos;
5945 	if (pos < t->count)
5946 		memmove(l + 1, l,
5947 			(t->count - pos) * sizeof(struct location));
5948 	t->count++;
5949 	l->count = 1;
5950 	l->addr = track->addr;
5951 	l->sum_time = age;
5952 	l->min_time = age;
5953 	l->max_time = age;
5954 	l->min_pid = track->pid;
5955 	l->max_pid = track->pid;
5956 	l->handle = handle;
5957 	l->waste = waste;
5958 	cpumask_clear(to_cpumask(l->cpus));
5959 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
5960 	nodes_clear(l->nodes);
5961 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
5962 	return 1;
5963 }
5964 
5965 static void process_slab(struct loc_track *t, struct kmem_cache *s,
5966 		struct slab *slab, enum track_item alloc,
5967 		unsigned long *obj_map)
5968 {
5969 	void *addr = slab_address(slab);
5970 	bool is_alloc = (alloc == TRACK_ALLOC);
5971 	void *p;
5972 
5973 	__fill_map(obj_map, s, slab);
5974 
5975 	for_each_object(p, s, addr, slab->objects)
5976 		if (!test_bit(__obj_to_index(s, addr, p), obj_map))
5977 			add_location(t, s, get_track(s, p, alloc),
5978 				     is_alloc ? get_orig_size(s, p) :
5979 						s->object_size);
5980 }
5981 #endif  /* CONFIG_DEBUG_FS   */
5982 #endif	/* CONFIG_SLUB_DEBUG */
5983 
5984 #ifdef SLAB_SUPPORTS_SYSFS
5985 enum slab_stat_type {
5986 	SL_ALL,			/* All slabs */
5987 	SL_PARTIAL,		/* Only partially allocated slabs */
5988 	SL_CPU,			/* Only slabs used for cpu caches */
5989 	SL_OBJECTS,		/* Determine allocated objects not slabs */
5990 	SL_TOTAL		/* Determine object capacity not slabs */
5991 };
5992 
5993 #define SO_ALL		(1 << SL_ALL)
5994 #define SO_PARTIAL	(1 << SL_PARTIAL)
5995 #define SO_CPU		(1 << SL_CPU)
5996 #define SO_OBJECTS	(1 << SL_OBJECTS)
5997 #define SO_TOTAL	(1 << SL_TOTAL)
5998 
5999 static ssize_t show_slab_objects(struct kmem_cache *s,
6000 				 char *buf, unsigned long flags)
6001 {
6002 	unsigned long total = 0;
6003 	int node;
6004 	int x;
6005 	unsigned long *nodes;
6006 	int len = 0;
6007 
6008 	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
6009 	if (!nodes)
6010 		return -ENOMEM;
6011 
6012 	if (flags & SO_CPU) {
6013 		int cpu;
6014 
6015 		for_each_possible_cpu(cpu) {
6016 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
6017 							       cpu);
6018 			int node;
6019 			struct slab *slab;
6020 
6021 			slab = READ_ONCE(c->slab);
6022 			if (!slab)
6023 				continue;
6024 
6025 			node = slab_nid(slab);
6026 			if (flags & SO_TOTAL)
6027 				x = slab->objects;
6028 			else if (flags & SO_OBJECTS)
6029 				x = slab->inuse;
6030 			else
6031 				x = 1;
6032 
6033 			total += x;
6034 			nodes[node] += x;
6035 
6036 #ifdef CONFIG_SLUB_CPU_PARTIAL
6037 			slab = slub_percpu_partial_read_once(c);
6038 			if (slab) {
6039 				node = slab_nid(slab);
6040 				if (flags & SO_TOTAL)
6041 					WARN_ON_ONCE(1);
6042 				else if (flags & SO_OBJECTS)
6043 					WARN_ON_ONCE(1);
6044 				else
6045 					x = slab->slabs;
6046 				total += x;
6047 				nodes[node] += x;
6048 			}
6049 #endif
6050 		}
6051 	}
6052 
6053 	/*
6054 	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
6055 	 * already held which will conflict with an existing lock order:
6056 	 *
6057 	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
6058 	 *
6059 	 * We don't really need mem_hotplug_lock (to hold off
6060 	 * slab_mem_going_offline_callback) here because slab's memory hot
6061 	 * unplug code doesn't destroy the kmem_cache->node[] data.
6062 	 */
6063 
6064 #ifdef CONFIG_SLUB_DEBUG
6065 	if (flags & SO_ALL) {
6066 		struct kmem_cache_node *n;
6067 
6068 		for_each_kmem_cache_node(s, node, n) {
6069 
6070 			if (flags & SO_TOTAL)
6071 				x = node_nr_objs(n);
6072 			else if (flags & SO_OBJECTS)
6073 				x = node_nr_objs(n) - count_partial(n, count_free);
6074 			else
6075 				x = node_nr_slabs(n);
6076 			total += x;
6077 			nodes[node] += x;
6078 		}
6079 
6080 	} else
6081 #endif
6082 	if (flags & SO_PARTIAL) {
6083 		struct kmem_cache_node *n;
6084 
6085 		for_each_kmem_cache_node(s, node, n) {
6086 			if (flags & SO_TOTAL)
6087 				x = count_partial(n, count_total);
6088 			else if (flags & SO_OBJECTS)
6089 				x = count_partial(n, count_inuse);
6090 			else
6091 				x = n->nr_partial;
6092 			total += x;
6093 			nodes[node] += x;
6094 		}
6095 	}
6096 
6097 	len += sysfs_emit_at(buf, len, "%lu", total);
6098 #ifdef CONFIG_NUMA
6099 	for (node = 0; node < nr_node_ids; node++) {
6100 		if (nodes[node])
6101 			len += sysfs_emit_at(buf, len, " N%d=%lu",
6102 					     node, nodes[node]);
6103 	}
6104 #endif
6105 	len += sysfs_emit_at(buf, len, "\n");
6106 	kfree(nodes);
6107 
6108 	return len;
6109 }
6110 
6111 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
6112 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
6113 
6114 struct slab_attribute {
6115 	struct attribute attr;
6116 	ssize_t (*show)(struct kmem_cache *s, char *buf);
6117 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
6118 };
6119 
6120 #define SLAB_ATTR_RO(_name) \
6121 	static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
6122 
6123 #define SLAB_ATTR(_name) \
6124 	static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
6125 
6126 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
6127 {
6128 	return sysfs_emit(buf, "%u\n", s->size);
6129 }
6130 SLAB_ATTR_RO(slab_size);
6131 
6132 static ssize_t align_show(struct kmem_cache *s, char *buf)
6133 {
6134 	return sysfs_emit(buf, "%u\n", s->align);
6135 }
6136 SLAB_ATTR_RO(align);
6137 
6138 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
6139 {
6140 	return sysfs_emit(buf, "%u\n", s->object_size);
6141 }
6142 SLAB_ATTR_RO(object_size);
6143 
6144 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
6145 {
6146 	return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
6147 }
6148 SLAB_ATTR_RO(objs_per_slab);
6149 
6150 static ssize_t order_show(struct kmem_cache *s, char *buf)
6151 {
6152 	return sysfs_emit(buf, "%u\n", oo_order(s->oo));
6153 }
6154 SLAB_ATTR_RO(order);
6155 
6156 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
6157 {
6158 	return sysfs_emit(buf, "%lu\n", s->min_partial);
6159 }
6160 
6161 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
6162 				 size_t length)
6163 {
6164 	unsigned long min;
6165 	int err;
6166 
6167 	err = kstrtoul(buf, 10, &min);
6168 	if (err)
6169 		return err;
6170 
6171 	s->min_partial = min;
6172 	return length;
6173 }
6174 SLAB_ATTR(min_partial);
6175 
6176 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
6177 {
6178 	unsigned int nr_partial = 0;
6179 #ifdef CONFIG_SLUB_CPU_PARTIAL
6180 	nr_partial = s->cpu_partial;
6181 #endif
6182 
6183 	return sysfs_emit(buf, "%u\n", nr_partial);
6184 }
6185 
6186 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
6187 				 size_t length)
6188 {
6189 	unsigned int objects;
6190 	int err;
6191 
6192 	err = kstrtouint(buf, 10, &objects);
6193 	if (err)
6194 		return err;
6195 	if (objects && !kmem_cache_has_cpu_partial(s))
6196 		return -EINVAL;
6197 
6198 	slub_set_cpu_partial(s, objects);
6199 	flush_all(s);
6200 	return length;
6201 }
6202 SLAB_ATTR(cpu_partial);
6203 
6204 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
6205 {
6206 	if (!s->ctor)
6207 		return 0;
6208 	return sysfs_emit(buf, "%pS\n", s->ctor);
6209 }
6210 SLAB_ATTR_RO(ctor);
6211 
6212 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
6213 {
6214 	return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
6215 }
6216 SLAB_ATTR_RO(aliases);
6217 
6218 static ssize_t partial_show(struct kmem_cache *s, char *buf)
6219 {
6220 	return show_slab_objects(s, buf, SO_PARTIAL);
6221 }
6222 SLAB_ATTR_RO(partial);
6223 
6224 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
6225 {
6226 	return show_slab_objects(s, buf, SO_CPU);
6227 }
6228 SLAB_ATTR_RO(cpu_slabs);
6229 
6230 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
6231 {
6232 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
6233 }
6234 SLAB_ATTR_RO(objects_partial);
6235 
6236 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
6237 {
6238 	int objects = 0;
6239 	int slabs = 0;
6240 	int cpu __maybe_unused;
6241 	int len = 0;
6242 
6243 #ifdef CONFIG_SLUB_CPU_PARTIAL
6244 	for_each_online_cpu(cpu) {
6245 		struct slab *slab;
6246 
6247 		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6248 
6249 		if (slab)
6250 			slabs += slab->slabs;
6251 	}
6252 #endif
6253 
6254 	/* Approximate half-full slabs, see slub_set_cpu_partial() */
6255 	objects = (slabs * oo_objects(s->oo)) / 2;
6256 	len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
6257 
6258 #ifdef CONFIG_SLUB_CPU_PARTIAL
6259 	for_each_online_cpu(cpu) {
6260 		struct slab *slab;
6261 
6262 		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6263 		if (slab) {
6264 			slabs = READ_ONCE(slab->slabs);
6265 			objects = (slabs * oo_objects(s->oo)) / 2;
6266 			len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
6267 					     cpu, objects, slabs);
6268 		}
6269 	}
6270 #endif
6271 	len += sysfs_emit_at(buf, len, "\n");
6272 
6273 	return len;
6274 }
6275 SLAB_ATTR_RO(slabs_cpu_partial);
6276 
6277 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
6278 {
6279 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
6280 }
6281 SLAB_ATTR_RO(reclaim_account);
6282 
6283 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
6284 {
6285 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
6286 }
6287 SLAB_ATTR_RO(hwcache_align);
6288 
6289 #ifdef CONFIG_ZONE_DMA
6290 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
6291 {
6292 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
6293 }
6294 SLAB_ATTR_RO(cache_dma);
6295 #endif
6296 
6297 #ifdef CONFIG_HARDENED_USERCOPY
6298 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
6299 {
6300 	return sysfs_emit(buf, "%u\n", s->usersize);
6301 }
6302 SLAB_ATTR_RO(usersize);
6303 #endif
6304 
6305 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
6306 {
6307 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
6308 }
6309 SLAB_ATTR_RO(destroy_by_rcu);
6310 
6311 #ifdef CONFIG_SLUB_DEBUG
6312 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
6313 {
6314 	return show_slab_objects(s, buf, SO_ALL);
6315 }
6316 SLAB_ATTR_RO(slabs);
6317 
6318 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
6319 {
6320 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
6321 }
6322 SLAB_ATTR_RO(total_objects);
6323 
6324 static ssize_t objects_show(struct kmem_cache *s, char *buf)
6325 {
6326 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
6327 }
6328 SLAB_ATTR_RO(objects);
6329 
6330 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
6331 {
6332 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
6333 }
6334 SLAB_ATTR_RO(sanity_checks);
6335 
6336 static ssize_t trace_show(struct kmem_cache *s, char *buf)
6337 {
6338 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
6339 }
6340 SLAB_ATTR_RO(trace);
6341 
6342 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
6343 {
6344 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
6345 }
6346 
6347 SLAB_ATTR_RO(red_zone);
6348 
6349 static ssize_t poison_show(struct kmem_cache *s, char *buf)
6350 {
6351 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
6352 }
6353 
6354 SLAB_ATTR_RO(poison);
6355 
6356 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
6357 {
6358 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
6359 }
6360 
6361 SLAB_ATTR_RO(store_user);
6362 
6363 static ssize_t validate_show(struct kmem_cache *s, char *buf)
6364 {
6365 	return 0;
6366 }
6367 
6368 static ssize_t validate_store(struct kmem_cache *s,
6369 			const char *buf, size_t length)
6370 {
6371 	int ret = -EINVAL;
6372 
6373 	if (buf[0] == '1' && kmem_cache_debug(s)) {
6374 		ret = validate_slab_cache(s);
6375 		if (ret >= 0)
6376 			ret = length;
6377 	}
6378 	return ret;
6379 }
6380 SLAB_ATTR(validate);
6381 
6382 #endif /* CONFIG_SLUB_DEBUG */
6383 
6384 #ifdef CONFIG_FAILSLAB
6385 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
6386 {
6387 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
6388 }
6389 
6390 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
6391 				size_t length)
6392 {
6393 	if (s->refcount > 1)
6394 		return -EINVAL;
6395 
6396 	if (buf[0] == '1')
6397 		WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
6398 	else
6399 		WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
6400 
6401 	return length;
6402 }
6403 SLAB_ATTR(failslab);
6404 #endif
6405 
6406 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
6407 {
6408 	return 0;
6409 }
6410 
6411 static ssize_t shrink_store(struct kmem_cache *s,
6412 			const char *buf, size_t length)
6413 {
6414 	if (buf[0] == '1')
6415 		kmem_cache_shrink(s);
6416 	else
6417 		return -EINVAL;
6418 	return length;
6419 }
6420 SLAB_ATTR(shrink);
6421 
6422 #ifdef CONFIG_NUMA
6423 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
6424 {
6425 	return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
6426 }
6427 
6428 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
6429 				const char *buf, size_t length)
6430 {
6431 	unsigned int ratio;
6432 	int err;
6433 
6434 	err = kstrtouint(buf, 10, &ratio);
6435 	if (err)
6436 		return err;
6437 	if (ratio > 100)
6438 		return -ERANGE;
6439 
6440 	s->remote_node_defrag_ratio = ratio * 10;
6441 
6442 	return length;
6443 }
6444 SLAB_ATTR(remote_node_defrag_ratio);
6445 #endif
6446 
6447 #ifdef CONFIG_SLUB_STATS
6448 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
6449 {
6450 	unsigned long sum  = 0;
6451 	int cpu;
6452 	int len = 0;
6453 	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
6454 
6455 	if (!data)
6456 		return -ENOMEM;
6457 
6458 	for_each_online_cpu(cpu) {
6459 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
6460 
6461 		data[cpu] = x;
6462 		sum += x;
6463 	}
6464 
6465 	len += sysfs_emit_at(buf, len, "%lu", sum);
6466 
6467 #ifdef CONFIG_SMP
6468 	for_each_online_cpu(cpu) {
6469 		if (data[cpu])
6470 			len += sysfs_emit_at(buf, len, " C%d=%u",
6471 					     cpu, data[cpu]);
6472 	}
6473 #endif
6474 	kfree(data);
6475 	len += sysfs_emit_at(buf, len, "\n");
6476 
6477 	return len;
6478 }
6479 
6480 static void clear_stat(struct kmem_cache *s, enum stat_item si)
6481 {
6482 	int cpu;
6483 
6484 	for_each_online_cpu(cpu)
6485 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
6486 }
6487 
6488 #define STAT_ATTR(si, text) 					\
6489 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
6490 {								\
6491 	return show_stat(s, buf, si);				\
6492 }								\
6493 static ssize_t text##_store(struct kmem_cache *s,		\
6494 				const char *buf, size_t length)	\
6495 {								\
6496 	if (buf[0] != '0')					\
6497 		return -EINVAL;					\
6498 	clear_stat(s, si);					\
6499 	return length;						\
6500 }								\
6501 SLAB_ATTR(text);						\
6502 
6503 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
6504 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
6505 STAT_ATTR(FREE_FASTPATH, free_fastpath);
6506 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
6507 STAT_ATTR(FREE_FROZEN, free_frozen);
6508 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
6509 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
6510 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
6511 STAT_ATTR(ALLOC_SLAB, alloc_slab);
6512 STAT_ATTR(ALLOC_REFILL, alloc_refill);
6513 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
6514 STAT_ATTR(FREE_SLAB, free_slab);
6515 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
6516 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
6517 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
6518 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
6519 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
6520 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
6521 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
6522 STAT_ATTR(ORDER_FALLBACK, order_fallback);
6523 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
6524 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
6525 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
6526 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
6527 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
6528 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
6529 #endif	/* CONFIG_SLUB_STATS */
6530 
6531 #ifdef CONFIG_KFENCE
6532 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
6533 {
6534 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
6535 }
6536 
6537 static ssize_t skip_kfence_store(struct kmem_cache *s,
6538 			const char *buf, size_t length)
6539 {
6540 	int ret = length;
6541 
6542 	if (buf[0] == '0')
6543 		s->flags &= ~SLAB_SKIP_KFENCE;
6544 	else if (buf[0] == '1')
6545 		s->flags |= SLAB_SKIP_KFENCE;
6546 	else
6547 		ret = -EINVAL;
6548 
6549 	return ret;
6550 }
6551 SLAB_ATTR(skip_kfence);
6552 #endif
6553 
6554 static struct attribute *slab_attrs[] = {
6555 	&slab_size_attr.attr,
6556 	&object_size_attr.attr,
6557 	&objs_per_slab_attr.attr,
6558 	&order_attr.attr,
6559 	&min_partial_attr.attr,
6560 	&cpu_partial_attr.attr,
6561 	&objects_partial_attr.attr,
6562 	&partial_attr.attr,
6563 	&cpu_slabs_attr.attr,
6564 	&ctor_attr.attr,
6565 	&aliases_attr.attr,
6566 	&align_attr.attr,
6567 	&hwcache_align_attr.attr,
6568 	&reclaim_account_attr.attr,
6569 	&destroy_by_rcu_attr.attr,
6570 	&shrink_attr.attr,
6571 	&slabs_cpu_partial_attr.attr,
6572 #ifdef CONFIG_SLUB_DEBUG
6573 	&total_objects_attr.attr,
6574 	&objects_attr.attr,
6575 	&slabs_attr.attr,
6576 	&sanity_checks_attr.attr,
6577 	&trace_attr.attr,
6578 	&red_zone_attr.attr,
6579 	&poison_attr.attr,
6580 	&store_user_attr.attr,
6581 	&validate_attr.attr,
6582 #endif
6583 #ifdef CONFIG_ZONE_DMA
6584 	&cache_dma_attr.attr,
6585 #endif
6586 #ifdef CONFIG_NUMA
6587 	&remote_node_defrag_ratio_attr.attr,
6588 #endif
6589 #ifdef CONFIG_SLUB_STATS
6590 	&alloc_fastpath_attr.attr,
6591 	&alloc_slowpath_attr.attr,
6592 	&free_fastpath_attr.attr,
6593 	&free_slowpath_attr.attr,
6594 	&free_frozen_attr.attr,
6595 	&free_add_partial_attr.attr,
6596 	&free_remove_partial_attr.attr,
6597 	&alloc_from_partial_attr.attr,
6598 	&alloc_slab_attr.attr,
6599 	&alloc_refill_attr.attr,
6600 	&alloc_node_mismatch_attr.attr,
6601 	&free_slab_attr.attr,
6602 	&cpuslab_flush_attr.attr,
6603 	&deactivate_full_attr.attr,
6604 	&deactivate_empty_attr.attr,
6605 	&deactivate_to_head_attr.attr,
6606 	&deactivate_to_tail_attr.attr,
6607 	&deactivate_remote_frees_attr.attr,
6608 	&deactivate_bypass_attr.attr,
6609 	&order_fallback_attr.attr,
6610 	&cmpxchg_double_fail_attr.attr,
6611 	&cmpxchg_double_cpu_fail_attr.attr,
6612 	&cpu_partial_alloc_attr.attr,
6613 	&cpu_partial_free_attr.attr,
6614 	&cpu_partial_node_attr.attr,
6615 	&cpu_partial_drain_attr.attr,
6616 #endif
6617 #ifdef CONFIG_FAILSLAB
6618 	&failslab_attr.attr,
6619 #endif
6620 #ifdef CONFIG_HARDENED_USERCOPY
6621 	&usersize_attr.attr,
6622 #endif
6623 #ifdef CONFIG_KFENCE
6624 	&skip_kfence_attr.attr,
6625 #endif
6626 
6627 	NULL
6628 };
6629 
6630 static const struct attribute_group slab_attr_group = {
6631 	.attrs = slab_attrs,
6632 };
6633 
6634 static ssize_t slab_attr_show(struct kobject *kobj,
6635 				struct attribute *attr,
6636 				char *buf)
6637 {
6638 	struct slab_attribute *attribute;
6639 	struct kmem_cache *s;
6640 
6641 	attribute = to_slab_attr(attr);
6642 	s = to_slab(kobj);
6643 
6644 	if (!attribute->show)
6645 		return -EIO;
6646 
6647 	return attribute->show(s, buf);
6648 }
6649 
6650 static ssize_t slab_attr_store(struct kobject *kobj,
6651 				struct attribute *attr,
6652 				const char *buf, size_t len)
6653 {
6654 	struct slab_attribute *attribute;
6655 	struct kmem_cache *s;
6656 
6657 	attribute = to_slab_attr(attr);
6658 	s = to_slab(kobj);
6659 
6660 	if (!attribute->store)
6661 		return -EIO;
6662 
6663 	return attribute->store(s, buf, len);
6664 }
6665 
6666 static void kmem_cache_release(struct kobject *k)
6667 {
6668 	slab_kmem_cache_release(to_slab(k));
6669 }
6670 
6671 static const struct sysfs_ops slab_sysfs_ops = {
6672 	.show = slab_attr_show,
6673 	.store = slab_attr_store,
6674 };
6675 
6676 static const struct kobj_type slab_ktype = {
6677 	.sysfs_ops = &slab_sysfs_ops,
6678 	.release = kmem_cache_release,
6679 };
6680 
6681 static struct kset *slab_kset;
6682 
6683 static inline struct kset *cache_kset(struct kmem_cache *s)
6684 {
6685 	return slab_kset;
6686 }
6687 
6688 #define ID_STR_LENGTH 32
6689 
6690 /* Create a unique string id for a slab cache:
6691  *
6692  * Format	:[flags-]size
6693  */
6694 static char *create_unique_id(struct kmem_cache *s)
6695 {
6696 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
6697 	char *p = name;
6698 
6699 	if (!name)
6700 		return ERR_PTR(-ENOMEM);
6701 
6702 	*p++ = ':';
6703 	/*
6704 	 * First flags affecting slabcache operations. We will only
6705 	 * get here for aliasable slabs so we do not need to support
6706 	 * too many flags. The flags here must cover all flags that
6707 	 * are matched during merging to guarantee that the id is
6708 	 * unique.
6709 	 */
6710 	if (s->flags & SLAB_CACHE_DMA)
6711 		*p++ = 'd';
6712 	if (s->flags & SLAB_CACHE_DMA32)
6713 		*p++ = 'D';
6714 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
6715 		*p++ = 'a';
6716 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
6717 		*p++ = 'F';
6718 	if (s->flags & SLAB_ACCOUNT)
6719 		*p++ = 'A';
6720 	if (p != name + 1)
6721 		*p++ = '-';
6722 	p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
6723 
6724 	if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
6725 		kfree(name);
6726 		return ERR_PTR(-EINVAL);
6727 	}
6728 	kmsan_unpoison_memory(name, p - name);
6729 	return name;
6730 }
6731 
6732 static int sysfs_slab_add(struct kmem_cache *s)
6733 {
6734 	int err;
6735 	const char *name;
6736 	struct kset *kset = cache_kset(s);
6737 	int unmergeable = slab_unmergeable(s);
6738 
6739 	if (!unmergeable && disable_higher_order_debug &&
6740 			(slub_debug & DEBUG_METADATA_FLAGS))
6741 		unmergeable = 1;
6742 
6743 	if (unmergeable) {
6744 		/*
6745 		 * Slabcache can never be merged so we can use the name proper.
6746 		 * This is typically the case for debug situations. In that
6747 		 * case we can catch duplicate names easily.
6748 		 */
6749 		sysfs_remove_link(&slab_kset->kobj, s->name);
6750 		name = s->name;
6751 	} else {
6752 		/*
6753 		 * Create a unique name for the slab as a target
6754 		 * for the symlinks.
6755 		 */
6756 		name = create_unique_id(s);
6757 		if (IS_ERR(name))
6758 			return PTR_ERR(name);
6759 	}
6760 
6761 	s->kobj.kset = kset;
6762 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
6763 	if (err)
6764 		goto out;
6765 
6766 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
6767 	if (err)
6768 		goto out_del_kobj;
6769 
6770 	if (!unmergeable) {
6771 		/* Setup first alias */
6772 		sysfs_slab_alias(s, s->name);
6773 	}
6774 out:
6775 	if (!unmergeable)
6776 		kfree(name);
6777 	return err;
6778 out_del_kobj:
6779 	kobject_del(&s->kobj);
6780 	goto out;
6781 }
6782 
6783 void sysfs_slab_unlink(struct kmem_cache *s)
6784 {
6785 	kobject_del(&s->kobj);
6786 }
6787 
6788 void sysfs_slab_release(struct kmem_cache *s)
6789 {
6790 	kobject_put(&s->kobj);
6791 }
6792 
6793 /*
6794  * Need to buffer aliases during bootup until sysfs becomes
6795  * available lest we lose that information.
6796  */
6797 struct saved_alias {
6798 	struct kmem_cache *s;
6799 	const char *name;
6800 	struct saved_alias *next;
6801 };
6802 
6803 static struct saved_alias *alias_list;
6804 
6805 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
6806 {
6807 	struct saved_alias *al;
6808 
6809 	if (slab_state == FULL) {
6810 		/*
6811 		 * If we have a leftover link then remove it.
6812 		 */
6813 		sysfs_remove_link(&slab_kset->kobj, name);
6814 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
6815 	}
6816 
6817 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
6818 	if (!al)
6819 		return -ENOMEM;
6820 
6821 	al->s = s;
6822 	al->name = name;
6823 	al->next = alias_list;
6824 	alias_list = al;
6825 	kmsan_unpoison_memory(al, sizeof(*al));
6826 	return 0;
6827 }
6828 
6829 static int __init slab_sysfs_init(void)
6830 {
6831 	struct kmem_cache *s;
6832 	int err;
6833 
6834 	mutex_lock(&slab_mutex);
6835 
6836 	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
6837 	if (!slab_kset) {
6838 		mutex_unlock(&slab_mutex);
6839 		pr_err("Cannot register slab subsystem.\n");
6840 		return -ENOMEM;
6841 	}
6842 
6843 	slab_state = FULL;
6844 
6845 	list_for_each_entry(s, &slab_caches, list) {
6846 		err = sysfs_slab_add(s);
6847 		if (err)
6848 			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
6849 			       s->name);
6850 	}
6851 
6852 	while (alias_list) {
6853 		struct saved_alias *al = alias_list;
6854 
6855 		alias_list = alias_list->next;
6856 		err = sysfs_slab_alias(al->s, al->name);
6857 		if (err)
6858 			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
6859 			       al->name);
6860 		kfree(al);
6861 	}
6862 
6863 	mutex_unlock(&slab_mutex);
6864 	return 0;
6865 }
6866 late_initcall(slab_sysfs_init);
6867 #endif /* SLAB_SUPPORTS_SYSFS */
6868 
6869 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
6870 static int slab_debugfs_show(struct seq_file *seq, void *v)
6871 {
6872 	struct loc_track *t = seq->private;
6873 	struct location *l;
6874 	unsigned long idx;
6875 
6876 	idx = (unsigned long) t->idx;
6877 	if (idx < t->count) {
6878 		l = &t->loc[idx];
6879 
6880 		seq_printf(seq, "%7ld ", l->count);
6881 
6882 		if (l->addr)
6883 			seq_printf(seq, "%pS", (void *)l->addr);
6884 		else
6885 			seq_puts(seq, "<not-available>");
6886 
6887 		if (l->waste)
6888 			seq_printf(seq, " waste=%lu/%lu",
6889 				l->count * l->waste, l->waste);
6890 
6891 		if (l->sum_time != l->min_time) {
6892 			seq_printf(seq, " age=%ld/%llu/%ld",
6893 				l->min_time, div_u64(l->sum_time, l->count),
6894 				l->max_time);
6895 		} else
6896 			seq_printf(seq, " age=%ld", l->min_time);
6897 
6898 		if (l->min_pid != l->max_pid)
6899 			seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
6900 		else
6901 			seq_printf(seq, " pid=%ld",
6902 				l->min_pid);
6903 
6904 		if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
6905 			seq_printf(seq, " cpus=%*pbl",
6906 				 cpumask_pr_args(to_cpumask(l->cpus)));
6907 
6908 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
6909 			seq_printf(seq, " nodes=%*pbl",
6910 				 nodemask_pr_args(&l->nodes));
6911 
6912 #ifdef CONFIG_STACKDEPOT
6913 		{
6914 			depot_stack_handle_t handle;
6915 			unsigned long *entries;
6916 			unsigned int nr_entries, j;
6917 
6918 			handle = READ_ONCE(l->handle);
6919 			if (handle) {
6920 				nr_entries = stack_depot_fetch(handle, &entries);
6921 				seq_puts(seq, "\n");
6922 				for (j = 0; j < nr_entries; j++)
6923 					seq_printf(seq, "        %pS\n", (void *)entries[j]);
6924 			}
6925 		}
6926 #endif
6927 		seq_puts(seq, "\n");
6928 	}
6929 
6930 	if (!idx && !t->count)
6931 		seq_puts(seq, "No data\n");
6932 
6933 	return 0;
6934 }
6935 
6936 static void slab_debugfs_stop(struct seq_file *seq, void *v)
6937 {
6938 }
6939 
6940 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
6941 {
6942 	struct loc_track *t = seq->private;
6943 
6944 	t->idx = ++(*ppos);
6945 	if (*ppos <= t->count)
6946 		return ppos;
6947 
6948 	return NULL;
6949 }
6950 
6951 static int cmp_loc_by_count(const void *a, const void *b, const void *data)
6952 {
6953 	struct location *loc1 = (struct location *)a;
6954 	struct location *loc2 = (struct location *)b;
6955 
6956 	if (loc1->count > loc2->count)
6957 		return -1;
6958 	else
6959 		return 1;
6960 }
6961 
6962 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
6963 {
6964 	struct loc_track *t = seq->private;
6965 
6966 	t->idx = *ppos;
6967 	return ppos;
6968 }
6969 
6970 static const struct seq_operations slab_debugfs_sops = {
6971 	.start  = slab_debugfs_start,
6972 	.next   = slab_debugfs_next,
6973 	.stop   = slab_debugfs_stop,
6974 	.show   = slab_debugfs_show,
6975 };
6976 
6977 static int slab_debug_trace_open(struct inode *inode, struct file *filep)
6978 {
6979 
6980 	struct kmem_cache_node *n;
6981 	enum track_item alloc;
6982 	int node;
6983 	struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
6984 						sizeof(struct loc_track));
6985 	struct kmem_cache *s = file_inode(filep)->i_private;
6986 	unsigned long *obj_map;
6987 
6988 	if (!t)
6989 		return -ENOMEM;
6990 
6991 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
6992 	if (!obj_map) {
6993 		seq_release_private(inode, filep);
6994 		return -ENOMEM;
6995 	}
6996 
6997 	if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
6998 		alloc = TRACK_ALLOC;
6999 	else
7000 		alloc = TRACK_FREE;
7001 
7002 	if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
7003 		bitmap_free(obj_map);
7004 		seq_release_private(inode, filep);
7005 		return -ENOMEM;
7006 	}
7007 
7008 	for_each_kmem_cache_node(s, node, n) {
7009 		unsigned long flags;
7010 		struct slab *slab;
7011 
7012 		if (!node_nr_slabs(n))
7013 			continue;
7014 
7015 		spin_lock_irqsave(&n->list_lock, flags);
7016 		list_for_each_entry(slab, &n->partial, slab_list)
7017 			process_slab(t, s, slab, alloc, obj_map);
7018 		list_for_each_entry(slab, &n->full, slab_list)
7019 			process_slab(t, s, slab, alloc, obj_map);
7020 		spin_unlock_irqrestore(&n->list_lock, flags);
7021 	}
7022 
7023 	/* Sort locations by count */
7024 	sort_r(t->loc, t->count, sizeof(struct location),
7025 		cmp_loc_by_count, NULL, NULL);
7026 
7027 	bitmap_free(obj_map);
7028 	return 0;
7029 }
7030 
7031 static int slab_debug_trace_release(struct inode *inode, struct file *file)
7032 {
7033 	struct seq_file *seq = file->private_data;
7034 	struct loc_track *t = seq->private;
7035 
7036 	free_loc_track(t);
7037 	return seq_release_private(inode, file);
7038 }
7039 
7040 static const struct file_operations slab_debugfs_fops = {
7041 	.open    = slab_debug_trace_open,
7042 	.read    = seq_read,
7043 	.llseek  = seq_lseek,
7044 	.release = slab_debug_trace_release,
7045 };
7046 
7047 static void debugfs_slab_add(struct kmem_cache *s)
7048 {
7049 	struct dentry *slab_cache_dir;
7050 
7051 	if (unlikely(!slab_debugfs_root))
7052 		return;
7053 
7054 	slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
7055 
7056 	debugfs_create_file("alloc_traces", 0400,
7057 		slab_cache_dir, s, &slab_debugfs_fops);
7058 
7059 	debugfs_create_file("free_traces", 0400,
7060 		slab_cache_dir, s, &slab_debugfs_fops);
7061 }
7062 
7063 void debugfs_slab_release(struct kmem_cache *s)
7064 {
7065 	debugfs_lookup_and_remove(s->name, slab_debugfs_root);
7066 }
7067 
7068 static int __init slab_debugfs_init(void)
7069 {
7070 	struct kmem_cache *s;
7071 
7072 	slab_debugfs_root = debugfs_create_dir("slab", NULL);
7073 
7074 	list_for_each_entry(s, &slab_caches, list)
7075 		if (s->flags & SLAB_STORE_USER)
7076 			debugfs_slab_add(s);
7077 
7078 	return 0;
7079 
7080 }
7081 __initcall(slab_debugfs_init);
7082 #endif
7083 /*
7084  * The /proc/slabinfo ABI
7085  */
7086 #ifdef CONFIG_SLUB_DEBUG
7087 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
7088 {
7089 	unsigned long nr_slabs = 0;
7090 	unsigned long nr_objs = 0;
7091 	unsigned long nr_free = 0;
7092 	int node;
7093 	struct kmem_cache_node *n;
7094 
7095 	for_each_kmem_cache_node(s, node, n) {
7096 		nr_slabs += node_nr_slabs(n);
7097 		nr_objs += node_nr_objs(n);
7098 		nr_free += count_partial(n, count_free);
7099 	}
7100 
7101 	sinfo->active_objs = nr_objs - nr_free;
7102 	sinfo->num_objs = nr_objs;
7103 	sinfo->active_slabs = nr_slabs;
7104 	sinfo->num_slabs = nr_slabs;
7105 	sinfo->objects_per_slab = oo_objects(s->oo);
7106 	sinfo->cache_order = oo_order(s->oo);
7107 }
7108 
7109 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
7110 {
7111 }
7112 
7113 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
7114 		       size_t count, loff_t *ppos)
7115 {
7116 	return -EIO;
7117 }
7118 #endif /* CONFIG_SLUB_DEBUG */
7119