xref: /linux/mm/slub.c (revision 67f2df3b82d091ed095d0e47e1f3a9d3e18e4e41)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SLUB: A slab allocator that limits cache line use instead of queuing
4  * objects in per cpu and per node lists.
5  *
6  * The allocator synchronizes using per slab locks or atomic operations
7  * and only uses a centralized lock to manage a pool of partial slabs.
8  *
9  * (C) 2007 SGI, Christoph Lameter
10  * (C) 2011 Linux Foundation, Christoph Lameter
11  */
12 
13 #include <linux/mm.h>
14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */
15 #include <linux/module.h>
16 #include <linux/bit_spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/swab.h>
19 #include <linux/bitops.h>
20 #include <linux/slab.h>
21 #include "slab.h"
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/kasan.h>
25 #include <linux/kmsan.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/mempolicy.h>
29 #include <linux/ctype.h>
30 #include <linux/stackdepot.h>
31 #include <linux/debugobjects.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kfence.h>
34 #include <linux/memory.h>
35 #include <linux/math64.h>
36 #include <linux/fault-inject.h>
37 #include <linux/kmemleak.h>
38 #include <linux/stacktrace.h>
39 #include <linux/prefetch.h>
40 #include <linux/memcontrol.h>
41 #include <linux/random.h>
42 #include <kunit/test.h>
43 #include <kunit/test-bug.h>
44 #include <linux/sort.h>
45 
46 #include <linux/debugfs.h>
47 #include <trace/events/kmem.h>
48 
49 #include "internal.h"
50 
51 /*
52  * Lock order:
53  *   1. slab_mutex (Global Mutex)
54  *   2. node->list_lock (Spinlock)
55  *   3. kmem_cache->cpu_slab->lock (Local lock)
56  *   4. slab_lock(slab) (Only on some arches)
57  *   5. object_map_lock (Only for debugging)
58  *
59  *   slab_mutex
60  *
61  *   The role of the slab_mutex is to protect the list of all the slabs
62  *   and to synchronize major metadata changes to slab cache structures.
63  *   Also synchronizes memory hotplug callbacks.
64  *
65  *   slab_lock
66  *
67  *   The slab_lock is a wrapper around the page lock, thus it is a bit
68  *   spinlock.
69  *
70  *   The slab_lock is only used on arches that do not have the ability
71  *   to do a cmpxchg_double. It only protects:
72  *
73  *	A. slab->freelist	-> List of free objects in a slab
74  *	B. slab->inuse		-> Number of objects in use
75  *	C. slab->objects	-> Number of objects in slab
76  *	D. slab->frozen		-> frozen state
77  *
78  *   Frozen slabs
79  *
80  *   If a slab is frozen then it is exempt from list management. It is
81  *   the cpu slab which is actively allocated from by the processor that
82  *   froze it and it is not on any list. The processor that froze the
83  *   slab is the one who can perform list operations on the slab. Other
84  *   processors may put objects onto the freelist but the processor that
85  *   froze the slab is the only one that can retrieve the objects from the
86  *   slab's freelist.
87  *
88  *   CPU partial slabs
89  *
90  *   The partially empty slabs cached on the CPU partial list are used
91  *   for performance reasons, which speeds up the allocation process.
92  *   These slabs are not frozen, but are also exempt from list management,
93  *   by clearing the PG_workingset flag when moving out of the node
94  *   partial list. Please see __slab_free() for more details.
95  *
96  *   To sum up, the current scheme is:
97  *   - node partial slab: PG_Workingset && !frozen
98  *   - cpu partial slab: !PG_Workingset && !frozen
99  *   - cpu slab: !PG_Workingset && frozen
100  *   - full slab: !PG_Workingset && !frozen
101  *
102  *   list_lock
103  *
104  *   The list_lock protects the partial and full list on each node and
105  *   the partial slab counter. If taken then no new slabs may be added or
106  *   removed from the lists nor make the number of partial slabs be modified.
107  *   (Note that the total number of slabs is an atomic value that may be
108  *   modified without taking the list lock).
109  *
110  *   The list_lock is a centralized lock and thus we avoid taking it as
111  *   much as possible. As long as SLUB does not have to handle partial
112  *   slabs, operations can continue without any centralized lock. F.e.
113  *   allocating a long series of objects that fill up slabs does not require
114  *   the list lock.
115  *
116  *   For debug caches, all allocations are forced to go through a list_lock
117  *   protected region to serialize against concurrent validation.
118  *
119  *   cpu_slab->lock local lock
120  *
121  *   This locks protect slowpath manipulation of all kmem_cache_cpu fields
122  *   except the stat counters. This is a percpu structure manipulated only by
123  *   the local cpu, so the lock protects against being preempted or interrupted
124  *   by an irq. Fast path operations rely on lockless operations instead.
125  *
126  *   On PREEMPT_RT, the local lock neither disables interrupts nor preemption
127  *   which means the lockless fastpath cannot be used as it might interfere with
128  *   an in-progress slow path operations. In this case the local lock is always
129  *   taken but it still utilizes the freelist for the common operations.
130  *
131  *   lockless fastpaths
132  *
133  *   The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
134  *   are fully lockless when satisfied from the percpu slab (and when
135  *   cmpxchg_double is possible to use, otherwise slab_lock is taken).
136  *   They also don't disable preemption or migration or irqs. They rely on
137  *   the transaction id (tid) field to detect being preempted or moved to
138  *   another cpu.
139  *
140  *   irq, preemption, migration considerations
141  *
142  *   Interrupts are disabled as part of list_lock or local_lock operations, or
143  *   around the slab_lock operation, in order to make the slab allocator safe
144  *   to use in the context of an irq.
145  *
146  *   In addition, preemption (or migration on PREEMPT_RT) is disabled in the
147  *   allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
148  *   local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
149  *   doesn't have to be revalidated in each section protected by the local lock.
150  *
151  * SLUB assigns one slab for allocation to each processor.
152  * Allocations only occur from these slabs called cpu slabs.
153  *
154  * Slabs with free elements are kept on a partial list and during regular
155  * operations no list for full slabs is used. If an object in a full slab is
156  * freed then the slab will show up again on the partial lists.
157  * We track full slabs for debugging purposes though because otherwise we
158  * cannot scan all objects.
159  *
160  * Slabs are freed when they become empty. Teardown and setup is
161  * minimal so we rely on the page allocators per cpu caches for
162  * fast frees and allocs.
163  *
164  * slab->frozen		The slab is frozen and exempt from list processing.
165  * 			This means that the slab is dedicated to a purpose
166  * 			such as satisfying allocations for a specific
167  * 			processor. Objects may be freed in the slab while
168  * 			it is frozen but slab_free will then skip the usual
169  * 			list operations. It is up to the processor holding
170  * 			the slab to integrate the slab into the slab lists
171  * 			when the slab is no longer needed.
172  *
173  * 			One use of this flag is to mark slabs that are
174  * 			used for allocations. Then such a slab becomes a cpu
175  * 			slab. The cpu slab may be equipped with an additional
176  * 			freelist that allows lockless access to
177  * 			free objects in addition to the regular freelist
178  * 			that requires the slab lock.
179  *
180  * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
181  * 			options set. This moves	slab handling out of
182  * 			the fast path and disables lockless freelists.
183  */
184 
185 /*
186  * We could simply use migrate_disable()/enable() but as long as it's a
187  * function call even on !PREEMPT_RT, use inline preempt_disable() there.
188  */
189 #ifndef CONFIG_PREEMPT_RT
190 #define slub_get_cpu_ptr(var)		get_cpu_ptr(var)
191 #define slub_put_cpu_ptr(var)		put_cpu_ptr(var)
192 #define USE_LOCKLESS_FAST_PATH()	(true)
193 #else
194 #define slub_get_cpu_ptr(var)		\
195 ({					\
196 	migrate_disable();		\
197 	this_cpu_ptr(var);		\
198 })
199 #define slub_put_cpu_ptr(var)		\
200 do {					\
201 	(void)(var);			\
202 	migrate_enable();		\
203 } while (0)
204 #define USE_LOCKLESS_FAST_PATH()	(false)
205 #endif
206 
207 #ifndef CONFIG_SLUB_TINY
208 #define __fastpath_inline __always_inline
209 #else
210 #define __fastpath_inline
211 #endif
212 
213 #ifdef CONFIG_SLUB_DEBUG
214 #ifdef CONFIG_SLUB_DEBUG_ON
215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
216 #else
217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
218 #endif
219 #endif		/* CONFIG_SLUB_DEBUG */
220 
221 /* Structure holding parameters for get_partial() call chain */
222 struct partial_context {
223 	gfp_t flags;
224 	unsigned int orig_size;
225 	void *object;
226 };
227 
228 static inline bool kmem_cache_debug(struct kmem_cache *s)
229 {
230 	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
231 }
232 
233 static inline bool slub_debug_orig_size(struct kmem_cache *s)
234 {
235 	return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
236 			(s->flags & SLAB_KMALLOC));
237 }
238 
239 void *fixup_red_left(struct kmem_cache *s, void *p)
240 {
241 	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
242 		p += s->red_left_pad;
243 
244 	return p;
245 }
246 
247 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
248 {
249 #ifdef CONFIG_SLUB_CPU_PARTIAL
250 	return !kmem_cache_debug(s);
251 #else
252 	return false;
253 #endif
254 }
255 
256 /*
257  * Issues still to be resolved:
258  *
259  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
260  *
261  * - Variable sizing of the per node arrays
262  */
263 
264 /* Enable to log cmpxchg failures */
265 #undef SLUB_DEBUG_CMPXCHG
266 
267 #ifndef CONFIG_SLUB_TINY
268 /*
269  * Minimum number of partial slabs. These will be left on the partial
270  * lists even if they are empty. kmem_cache_shrink may reclaim them.
271  */
272 #define MIN_PARTIAL 5
273 
274 /*
275  * Maximum number of desirable partial slabs.
276  * The existence of more partial slabs makes kmem_cache_shrink
277  * sort the partial list by the number of objects in use.
278  */
279 #define MAX_PARTIAL 10
280 #else
281 #define MIN_PARTIAL 0
282 #define MAX_PARTIAL 0
283 #endif
284 
285 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
286 				SLAB_POISON | SLAB_STORE_USER)
287 
288 /*
289  * These debug flags cannot use CMPXCHG because there might be consistency
290  * issues when checking or reading debug information
291  */
292 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
293 				SLAB_TRACE)
294 
295 
296 /*
297  * Debugging flags that require metadata to be stored in the slab.  These get
298  * disabled when slab_debug=O is used and a cache's min order increases with
299  * metadata.
300  */
301 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
302 
303 #define OO_SHIFT	16
304 #define OO_MASK		((1 << OO_SHIFT) - 1)
305 #define MAX_OBJS_PER_PAGE	32767 /* since slab.objects is u15 */
306 
307 /* Internal SLUB flags */
308 /* Poison object */
309 #define __OBJECT_POISON		__SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
310 /* Use cmpxchg_double */
311 
312 #ifdef system_has_freelist_aba
313 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
314 #else
315 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_UNUSED
316 #endif
317 
318 /*
319  * Tracking user of a slab.
320  */
321 #define TRACK_ADDRS_COUNT 16
322 struct track {
323 	unsigned long addr;	/* Called from address */
324 #ifdef CONFIG_STACKDEPOT
325 	depot_stack_handle_t handle;
326 #endif
327 	int cpu;		/* Was running on cpu */
328 	int pid;		/* Pid context */
329 	unsigned long when;	/* When did the operation occur */
330 };
331 
332 enum track_item { TRACK_ALLOC, TRACK_FREE };
333 
334 #ifdef SLAB_SUPPORTS_SYSFS
335 static int sysfs_slab_add(struct kmem_cache *);
336 static int sysfs_slab_alias(struct kmem_cache *, const char *);
337 #else
338 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
339 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
340 							{ return 0; }
341 #endif
342 
343 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
344 static void debugfs_slab_add(struct kmem_cache *);
345 #else
346 static inline void debugfs_slab_add(struct kmem_cache *s) { }
347 #endif
348 
349 enum stat_item {
350 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
351 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
352 	FREE_FASTPATH,		/* Free to cpu slab */
353 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
354 	FREE_FROZEN,		/* Freeing to frozen slab */
355 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
356 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
357 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
358 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
359 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
360 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
361 	FREE_SLAB,		/* Slab freed to the page allocator */
362 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
363 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
364 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
365 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
366 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
367 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
368 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
369 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
370 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
371 	CMPXCHG_DOUBLE_FAIL,	/* Failures of slab freelist update */
372 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
373 	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
374 	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
375 	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
376 	NR_SLUB_STAT_ITEMS
377 };
378 
379 #ifndef CONFIG_SLUB_TINY
380 /*
381  * When changing the layout, make sure freelist and tid are still compatible
382  * with this_cpu_cmpxchg_double() alignment requirements.
383  */
384 struct kmem_cache_cpu {
385 	union {
386 		struct {
387 			void **freelist;	/* Pointer to next available object */
388 			unsigned long tid;	/* Globally unique transaction id */
389 		};
390 		freelist_aba_t freelist_tid;
391 	};
392 	struct slab *slab;	/* The slab from which we are allocating */
393 #ifdef CONFIG_SLUB_CPU_PARTIAL
394 	struct slab *partial;	/* Partially allocated slabs */
395 #endif
396 	local_lock_t lock;	/* Protects the fields above */
397 #ifdef CONFIG_SLUB_STATS
398 	unsigned int stat[NR_SLUB_STAT_ITEMS];
399 #endif
400 };
401 #endif /* CONFIG_SLUB_TINY */
402 
403 static inline void stat(const struct kmem_cache *s, enum stat_item si)
404 {
405 #ifdef CONFIG_SLUB_STATS
406 	/*
407 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
408 	 * avoid this_cpu_add()'s irq-disable overhead.
409 	 */
410 	raw_cpu_inc(s->cpu_slab->stat[si]);
411 #endif
412 }
413 
414 static inline
415 void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
416 {
417 #ifdef CONFIG_SLUB_STATS
418 	raw_cpu_add(s->cpu_slab->stat[si], v);
419 #endif
420 }
421 
422 /*
423  * The slab lists for all objects.
424  */
425 struct kmem_cache_node {
426 	spinlock_t list_lock;
427 	unsigned long nr_partial;
428 	struct list_head partial;
429 #ifdef CONFIG_SLUB_DEBUG
430 	atomic_long_t nr_slabs;
431 	atomic_long_t total_objects;
432 	struct list_head full;
433 #endif
434 };
435 
436 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
437 {
438 	return s->node[node];
439 }
440 
441 /*
442  * Iterator over all nodes. The body will be executed for each node that has
443  * a kmem_cache_node structure allocated (which is true for all online nodes)
444  */
445 #define for_each_kmem_cache_node(__s, __node, __n) \
446 	for (__node = 0; __node < nr_node_ids; __node++) \
447 		 if ((__n = get_node(__s, __node)))
448 
449 /*
450  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
451  * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
452  * differ during memory hotplug/hotremove operations.
453  * Protected by slab_mutex.
454  */
455 static nodemask_t slab_nodes;
456 
457 #ifndef CONFIG_SLUB_TINY
458 /*
459  * Workqueue used for flush_cpu_slab().
460  */
461 static struct workqueue_struct *flushwq;
462 #endif
463 
464 /********************************************************************
465  * 			Core slab cache functions
466  *******************************************************************/
467 
468 /*
469  * freeptr_t represents a SLUB freelist pointer, which might be encoded
470  * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
471  */
472 typedef struct { unsigned long v; } freeptr_t;
473 
474 /*
475  * Returns freelist pointer (ptr). With hardening, this is obfuscated
476  * with an XOR of the address where the pointer is held and a per-cache
477  * random number.
478  */
479 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
480 					    void *ptr, unsigned long ptr_addr)
481 {
482 	unsigned long encoded;
483 
484 #ifdef CONFIG_SLAB_FREELIST_HARDENED
485 	encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
486 #else
487 	encoded = (unsigned long)ptr;
488 #endif
489 	return (freeptr_t){.v = encoded};
490 }
491 
492 static inline void *freelist_ptr_decode(const struct kmem_cache *s,
493 					freeptr_t ptr, unsigned long ptr_addr)
494 {
495 	void *decoded;
496 
497 #ifdef CONFIG_SLAB_FREELIST_HARDENED
498 	decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
499 #else
500 	decoded = (void *)ptr.v;
501 #endif
502 	return decoded;
503 }
504 
505 static inline void *get_freepointer(struct kmem_cache *s, void *object)
506 {
507 	unsigned long ptr_addr;
508 	freeptr_t p;
509 
510 	object = kasan_reset_tag(object);
511 	ptr_addr = (unsigned long)object + s->offset;
512 	p = *(freeptr_t *)(ptr_addr);
513 	return freelist_ptr_decode(s, p, ptr_addr);
514 }
515 
516 #ifndef CONFIG_SLUB_TINY
517 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
518 {
519 	prefetchw(object + s->offset);
520 }
521 #endif
522 
523 /*
524  * When running under KMSAN, get_freepointer_safe() may return an uninitialized
525  * pointer value in the case the current thread loses the race for the next
526  * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
527  * slab_alloc_node() will fail, so the uninitialized value won't be used, but
528  * KMSAN will still check all arguments of cmpxchg because of imperfect
529  * handling of inline assembly.
530  * To work around this problem, we apply __no_kmsan_checks to ensure that
531  * get_freepointer_safe() returns initialized memory.
532  */
533 __no_kmsan_checks
534 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
535 {
536 	unsigned long freepointer_addr;
537 	freeptr_t p;
538 
539 	if (!debug_pagealloc_enabled_static())
540 		return get_freepointer(s, object);
541 
542 	object = kasan_reset_tag(object);
543 	freepointer_addr = (unsigned long)object + s->offset;
544 	copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p));
545 	return freelist_ptr_decode(s, p, freepointer_addr);
546 }
547 
548 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
549 {
550 	unsigned long freeptr_addr = (unsigned long)object + s->offset;
551 
552 #ifdef CONFIG_SLAB_FREELIST_HARDENED
553 	BUG_ON(object == fp); /* naive detection of double free or corruption */
554 #endif
555 
556 	freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
557 	*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
558 }
559 
560 /*
561  * See comment in calculate_sizes().
562  */
563 static inline bool freeptr_outside_object(struct kmem_cache *s)
564 {
565 	return s->offset >= s->inuse;
566 }
567 
568 /*
569  * Return offset of the end of info block which is inuse + free pointer if
570  * not overlapping with object.
571  */
572 static inline unsigned int get_info_end(struct kmem_cache *s)
573 {
574 	if (freeptr_outside_object(s))
575 		return s->inuse + sizeof(void *);
576 	else
577 		return s->inuse;
578 }
579 
580 /* Loop over all objects in a slab */
581 #define for_each_object(__p, __s, __addr, __objects) \
582 	for (__p = fixup_red_left(__s, __addr); \
583 		__p < (__addr) + (__objects) * (__s)->size; \
584 		__p += (__s)->size)
585 
586 static inline unsigned int order_objects(unsigned int order, unsigned int size)
587 {
588 	return ((unsigned int)PAGE_SIZE << order) / size;
589 }
590 
591 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
592 		unsigned int size)
593 {
594 	struct kmem_cache_order_objects x = {
595 		(order << OO_SHIFT) + order_objects(order, size)
596 	};
597 
598 	return x;
599 }
600 
601 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
602 {
603 	return x.x >> OO_SHIFT;
604 }
605 
606 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
607 {
608 	return x.x & OO_MASK;
609 }
610 
611 #ifdef CONFIG_SLUB_CPU_PARTIAL
612 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
613 {
614 	unsigned int nr_slabs;
615 
616 	s->cpu_partial = nr_objects;
617 
618 	/*
619 	 * We take the number of objects but actually limit the number of
620 	 * slabs on the per cpu partial list, in order to limit excessive
621 	 * growth of the list. For simplicity we assume that the slabs will
622 	 * be half-full.
623 	 */
624 	nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
625 	s->cpu_partial_slabs = nr_slabs;
626 }
627 
628 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
629 {
630 	return s->cpu_partial_slabs;
631 }
632 #else
633 static inline void
634 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
635 {
636 }
637 
638 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
639 {
640 	return 0;
641 }
642 #endif /* CONFIG_SLUB_CPU_PARTIAL */
643 
644 /*
645  * Per slab locking using the pagelock
646  */
647 static __always_inline void slab_lock(struct slab *slab)
648 {
649 	bit_spin_lock(PG_locked, &slab->__page_flags);
650 }
651 
652 static __always_inline void slab_unlock(struct slab *slab)
653 {
654 	bit_spin_unlock(PG_locked, &slab->__page_flags);
655 }
656 
657 static inline bool
658 __update_freelist_fast(struct slab *slab,
659 		      void *freelist_old, unsigned long counters_old,
660 		      void *freelist_new, unsigned long counters_new)
661 {
662 #ifdef system_has_freelist_aba
663 	freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
664 	freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
665 
666 	return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
667 #else
668 	return false;
669 #endif
670 }
671 
672 static inline bool
673 __update_freelist_slow(struct slab *slab,
674 		      void *freelist_old, unsigned long counters_old,
675 		      void *freelist_new, unsigned long counters_new)
676 {
677 	bool ret = false;
678 
679 	slab_lock(slab);
680 	if (slab->freelist == freelist_old &&
681 	    slab->counters == counters_old) {
682 		slab->freelist = freelist_new;
683 		slab->counters = counters_new;
684 		ret = true;
685 	}
686 	slab_unlock(slab);
687 
688 	return ret;
689 }
690 
691 /*
692  * Interrupts must be disabled (for the fallback code to work right), typically
693  * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
694  * part of bit_spin_lock(), is sufficient because the policy is not to allow any
695  * allocation/ free operation in hardirq context. Therefore nothing can
696  * interrupt the operation.
697  */
698 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
699 		void *freelist_old, unsigned long counters_old,
700 		void *freelist_new, unsigned long counters_new,
701 		const char *n)
702 {
703 	bool ret;
704 
705 	if (USE_LOCKLESS_FAST_PATH())
706 		lockdep_assert_irqs_disabled();
707 
708 	if (s->flags & __CMPXCHG_DOUBLE) {
709 		ret = __update_freelist_fast(slab, freelist_old, counters_old,
710 				            freelist_new, counters_new);
711 	} else {
712 		ret = __update_freelist_slow(slab, freelist_old, counters_old,
713 				            freelist_new, counters_new);
714 	}
715 	if (likely(ret))
716 		return true;
717 
718 	cpu_relax();
719 	stat(s, CMPXCHG_DOUBLE_FAIL);
720 
721 #ifdef SLUB_DEBUG_CMPXCHG
722 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
723 #endif
724 
725 	return false;
726 }
727 
728 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
729 		void *freelist_old, unsigned long counters_old,
730 		void *freelist_new, unsigned long counters_new,
731 		const char *n)
732 {
733 	bool ret;
734 
735 	if (s->flags & __CMPXCHG_DOUBLE) {
736 		ret = __update_freelist_fast(slab, freelist_old, counters_old,
737 				            freelist_new, counters_new);
738 	} else {
739 		unsigned long flags;
740 
741 		local_irq_save(flags);
742 		ret = __update_freelist_slow(slab, freelist_old, counters_old,
743 				            freelist_new, counters_new);
744 		local_irq_restore(flags);
745 	}
746 	if (likely(ret))
747 		return true;
748 
749 	cpu_relax();
750 	stat(s, CMPXCHG_DOUBLE_FAIL);
751 
752 #ifdef SLUB_DEBUG_CMPXCHG
753 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
754 #endif
755 
756 	return false;
757 }
758 
759 #ifdef CONFIG_SLUB_DEBUG
760 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
761 static DEFINE_SPINLOCK(object_map_lock);
762 
763 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
764 		       struct slab *slab)
765 {
766 	void *addr = slab_address(slab);
767 	void *p;
768 
769 	bitmap_zero(obj_map, slab->objects);
770 
771 	for (p = slab->freelist; p; p = get_freepointer(s, p))
772 		set_bit(__obj_to_index(s, addr, p), obj_map);
773 }
774 
775 #if IS_ENABLED(CONFIG_KUNIT)
776 static bool slab_add_kunit_errors(void)
777 {
778 	struct kunit_resource *resource;
779 
780 	if (!kunit_get_current_test())
781 		return false;
782 
783 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
784 	if (!resource)
785 		return false;
786 
787 	(*(int *)resource->data)++;
788 	kunit_put_resource(resource);
789 	return true;
790 }
791 
792 static bool slab_in_kunit_test(void)
793 {
794 	struct kunit_resource *resource;
795 
796 	if (!kunit_get_current_test())
797 		return false;
798 
799 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
800 	if (!resource)
801 		return false;
802 
803 	kunit_put_resource(resource);
804 	return true;
805 }
806 #else
807 static inline bool slab_add_kunit_errors(void) { return false; }
808 static inline bool slab_in_kunit_test(void) { return false; }
809 #endif
810 
811 static inline unsigned int size_from_object(struct kmem_cache *s)
812 {
813 	if (s->flags & SLAB_RED_ZONE)
814 		return s->size - s->red_left_pad;
815 
816 	return s->size;
817 }
818 
819 static inline void *restore_red_left(struct kmem_cache *s, void *p)
820 {
821 	if (s->flags & SLAB_RED_ZONE)
822 		p -= s->red_left_pad;
823 
824 	return p;
825 }
826 
827 /*
828  * Debug settings:
829  */
830 #if defined(CONFIG_SLUB_DEBUG_ON)
831 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
832 #else
833 static slab_flags_t slub_debug;
834 #endif
835 
836 static char *slub_debug_string;
837 static int disable_higher_order_debug;
838 
839 /*
840  * slub is about to manipulate internal object metadata.  This memory lies
841  * outside the range of the allocated object, so accessing it would normally
842  * be reported by kasan as a bounds error.  metadata_access_enable() is used
843  * to tell kasan that these accesses are OK.
844  */
845 static inline void metadata_access_enable(void)
846 {
847 	kasan_disable_current();
848 }
849 
850 static inline void metadata_access_disable(void)
851 {
852 	kasan_enable_current();
853 }
854 
855 /*
856  * Object debugging
857  */
858 
859 /* Verify that a pointer has an address that is valid within a slab page */
860 static inline int check_valid_pointer(struct kmem_cache *s,
861 				struct slab *slab, void *object)
862 {
863 	void *base;
864 
865 	if (!object)
866 		return 1;
867 
868 	base = slab_address(slab);
869 	object = kasan_reset_tag(object);
870 	object = restore_red_left(s, object);
871 	if (object < base || object >= base + slab->objects * s->size ||
872 		(object - base) % s->size) {
873 		return 0;
874 	}
875 
876 	return 1;
877 }
878 
879 static void print_section(char *level, char *text, u8 *addr,
880 			  unsigned int length)
881 {
882 	metadata_access_enable();
883 	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
884 			16, 1, kasan_reset_tag((void *)addr), length, 1);
885 	metadata_access_disable();
886 }
887 
888 static struct track *get_track(struct kmem_cache *s, void *object,
889 	enum track_item alloc)
890 {
891 	struct track *p;
892 
893 	p = object + get_info_end(s);
894 
895 	return kasan_reset_tag(p + alloc);
896 }
897 
898 #ifdef CONFIG_STACKDEPOT
899 static noinline depot_stack_handle_t set_track_prepare(void)
900 {
901 	depot_stack_handle_t handle;
902 	unsigned long entries[TRACK_ADDRS_COUNT];
903 	unsigned int nr_entries;
904 
905 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
906 	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
907 
908 	return handle;
909 }
910 #else
911 static inline depot_stack_handle_t set_track_prepare(void)
912 {
913 	return 0;
914 }
915 #endif
916 
917 static void set_track_update(struct kmem_cache *s, void *object,
918 			     enum track_item alloc, unsigned long addr,
919 			     depot_stack_handle_t handle)
920 {
921 	struct track *p = get_track(s, object, alloc);
922 
923 #ifdef CONFIG_STACKDEPOT
924 	p->handle = handle;
925 #endif
926 	p->addr = addr;
927 	p->cpu = smp_processor_id();
928 	p->pid = current->pid;
929 	p->when = jiffies;
930 }
931 
932 static __always_inline void set_track(struct kmem_cache *s, void *object,
933 				      enum track_item alloc, unsigned long addr)
934 {
935 	depot_stack_handle_t handle = set_track_prepare();
936 
937 	set_track_update(s, object, alloc, addr, handle);
938 }
939 
940 static void init_tracking(struct kmem_cache *s, void *object)
941 {
942 	struct track *p;
943 
944 	if (!(s->flags & SLAB_STORE_USER))
945 		return;
946 
947 	p = get_track(s, object, TRACK_ALLOC);
948 	memset(p, 0, 2*sizeof(struct track));
949 }
950 
951 static void print_track(const char *s, struct track *t, unsigned long pr_time)
952 {
953 	depot_stack_handle_t handle __maybe_unused;
954 
955 	if (!t->addr)
956 		return;
957 
958 	pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
959 	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
960 #ifdef CONFIG_STACKDEPOT
961 	handle = READ_ONCE(t->handle);
962 	if (handle)
963 		stack_depot_print(handle);
964 	else
965 		pr_err("object allocation/free stack trace missing\n");
966 #endif
967 }
968 
969 void print_tracking(struct kmem_cache *s, void *object)
970 {
971 	unsigned long pr_time = jiffies;
972 	if (!(s->flags & SLAB_STORE_USER))
973 		return;
974 
975 	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
976 	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
977 }
978 
979 static void print_slab_info(const struct slab *slab)
980 {
981 	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
982 	       slab, slab->objects, slab->inuse, slab->freelist,
983 	       &slab->__page_flags);
984 }
985 
986 /*
987  * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
988  * family will round up the real request size to these fixed ones, so
989  * there could be an extra area than what is requested. Save the original
990  * request size in the meta data area, for better debug and sanity check.
991  */
992 static inline void set_orig_size(struct kmem_cache *s,
993 				void *object, unsigned int orig_size)
994 {
995 	void *p = kasan_reset_tag(object);
996 	unsigned int kasan_meta_size;
997 
998 	if (!slub_debug_orig_size(s))
999 		return;
1000 
1001 	/*
1002 	 * KASAN can save its free meta data inside of the object at offset 0.
1003 	 * If this meta data size is larger than 'orig_size', it will overlap
1004 	 * the data redzone in [orig_size+1, object_size]. Thus, we adjust
1005 	 * 'orig_size' to be as at least as big as KASAN's meta data.
1006 	 */
1007 	kasan_meta_size = kasan_metadata_size(s, true);
1008 	if (kasan_meta_size > orig_size)
1009 		orig_size = kasan_meta_size;
1010 
1011 	p += get_info_end(s);
1012 	p += sizeof(struct track) * 2;
1013 
1014 	*(unsigned int *)p = orig_size;
1015 }
1016 
1017 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
1018 {
1019 	void *p = kasan_reset_tag(object);
1020 
1021 	if (!slub_debug_orig_size(s))
1022 		return s->object_size;
1023 
1024 	p += get_info_end(s);
1025 	p += sizeof(struct track) * 2;
1026 
1027 	return *(unsigned int *)p;
1028 }
1029 
1030 void skip_orig_size_check(struct kmem_cache *s, const void *object)
1031 {
1032 	set_orig_size(s, (void *)object, s->object_size);
1033 }
1034 
1035 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
1036 {
1037 	struct va_format vaf;
1038 	va_list args;
1039 
1040 	va_start(args, fmt);
1041 	vaf.fmt = fmt;
1042 	vaf.va = &args;
1043 	pr_err("=============================================================================\n");
1044 	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
1045 	pr_err("-----------------------------------------------------------------------------\n\n");
1046 	va_end(args);
1047 }
1048 
1049 __printf(2, 3)
1050 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
1051 {
1052 	struct va_format vaf;
1053 	va_list args;
1054 
1055 	if (slab_add_kunit_errors())
1056 		return;
1057 
1058 	va_start(args, fmt);
1059 	vaf.fmt = fmt;
1060 	vaf.va = &args;
1061 	pr_err("FIX %s: %pV\n", s->name, &vaf);
1062 	va_end(args);
1063 }
1064 
1065 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1066 {
1067 	unsigned int off;	/* Offset of last byte */
1068 	u8 *addr = slab_address(slab);
1069 
1070 	print_tracking(s, p);
1071 
1072 	print_slab_info(slab);
1073 
1074 	pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1075 	       p, p - addr, get_freepointer(s, p));
1076 
1077 	if (s->flags & SLAB_RED_ZONE)
1078 		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
1079 			      s->red_left_pad);
1080 	else if (p > addr + 16)
1081 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1082 
1083 	print_section(KERN_ERR,         "Object   ", p,
1084 		      min_t(unsigned int, s->object_size, PAGE_SIZE));
1085 	if (s->flags & SLAB_RED_ZONE)
1086 		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
1087 			s->inuse - s->object_size);
1088 
1089 	off = get_info_end(s);
1090 
1091 	if (s->flags & SLAB_STORE_USER)
1092 		off += 2 * sizeof(struct track);
1093 
1094 	if (slub_debug_orig_size(s))
1095 		off += sizeof(unsigned int);
1096 
1097 	off += kasan_metadata_size(s, false);
1098 
1099 	if (off != size_from_object(s))
1100 		/* Beginning of the filler is the free pointer */
1101 		print_section(KERN_ERR, "Padding  ", p + off,
1102 			      size_from_object(s) - off);
1103 
1104 	dump_stack();
1105 }
1106 
1107 static void object_err(struct kmem_cache *s, struct slab *slab,
1108 			u8 *object, char *reason)
1109 {
1110 	if (slab_add_kunit_errors())
1111 		return;
1112 
1113 	slab_bug(s, "%s", reason);
1114 	print_trailer(s, slab, object);
1115 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1116 }
1117 
1118 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1119 			       void **freelist, void *nextfree)
1120 {
1121 	if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
1122 	    !check_valid_pointer(s, slab, nextfree) && freelist) {
1123 		object_err(s, slab, *freelist, "Freechain corrupt");
1124 		*freelist = NULL;
1125 		slab_fix(s, "Isolate corrupted freechain");
1126 		return true;
1127 	}
1128 
1129 	return false;
1130 }
1131 
1132 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1133 			const char *fmt, ...)
1134 {
1135 	va_list args;
1136 	char buf[100];
1137 
1138 	if (slab_add_kunit_errors())
1139 		return;
1140 
1141 	va_start(args, fmt);
1142 	vsnprintf(buf, sizeof(buf), fmt, args);
1143 	va_end(args);
1144 	slab_bug(s, "%s", buf);
1145 	print_slab_info(slab);
1146 	dump_stack();
1147 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1148 }
1149 
1150 static void init_object(struct kmem_cache *s, void *object, u8 val)
1151 {
1152 	u8 *p = kasan_reset_tag(object);
1153 	unsigned int poison_size = s->object_size;
1154 
1155 	if (s->flags & SLAB_RED_ZONE) {
1156 		memset(p - s->red_left_pad, val, s->red_left_pad);
1157 
1158 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1159 			/*
1160 			 * Redzone the extra allocated space by kmalloc than
1161 			 * requested, and the poison size will be limited to
1162 			 * the original request size accordingly.
1163 			 */
1164 			poison_size = get_orig_size(s, object);
1165 		}
1166 	}
1167 
1168 	if (s->flags & __OBJECT_POISON) {
1169 		memset(p, POISON_FREE, poison_size - 1);
1170 		p[poison_size - 1] = POISON_END;
1171 	}
1172 
1173 	if (s->flags & SLAB_RED_ZONE)
1174 		memset(p + poison_size, val, s->inuse - poison_size);
1175 }
1176 
1177 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
1178 						void *from, void *to)
1179 {
1180 	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1181 	memset(from, data, to - from);
1182 }
1183 
1184 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1185 			u8 *object, char *what,
1186 			u8 *start, unsigned int value, unsigned int bytes)
1187 {
1188 	u8 *fault;
1189 	u8 *end;
1190 	u8 *addr = slab_address(slab);
1191 
1192 	metadata_access_enable();
1193 	fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1194 	metadata_access_disable();
1195 	if (!fault)
1196 		return 1;
1197 
1198 	end = start + bytes;
1199 	while (end > fault && end[-1] == value)
1200 		end--;
1201 
1202 	if (slab_add_kunit_errors())
1203 		goto skip_bug_print;
1204 
1205 	slab_bug(s, "%s overwritten", what);
1206 	pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1207 					fault, end - 1, fault - addr,
1208 					fault[0], value);
1209 
1210 skip_bug_print:
1211 	restore_bytes(s, what, value, fault, end);
1212 	return 0;
1213 }
1214 
1215 /*
1216  * Object layout:
1217  *
1218  * object address
1219  * 	Bytes of the object to be managed.
1220  * 	If the freepointer may overlay the object then the free
1221  *	pointer is at the middle of the object.
1222  *
1223  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
1224  * 	0xa5 (POISON_END)
1225  *
1226  * object + s->object_size
1227  * 	Padding to reach word boundary. This is also used for Redzoning.
1228  * 	Padding is extended by another word if Redzoning is enabled and
1229  * 	object_size == inuse.
1230  *
1231  * 	We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with
1232  * 	0xcc (SLUB_RED_ACTIVE) for objects in use.
1233  *
1234  * object + s->inuse
1235  * 	Meta data starts here.
1236  *
1237  * 	A. Free pointer (if we cannot overwrite object on free)
1238  * 	B. Tracking data for SLAB_STORE_USER
1239  *	C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1240  *	D. Padding to reach required alignment boundary or at minimum
1241  * 		one word if debugging is on to be able to detect writes
1242  * 		before the word boundary.
1243  *
1244  *	Padding is done using 0x5a (POISON_INUSE)
1245  *
1246  * object + s->size
1247  * 	Nothing is used beyond s->size.
1248  *
1249  * If slabcaches are merged then the object_size and inuse boundaries are mostly
1250  * ignored. And therefore no slab options that rely on these boundaries
1251  * may be used with merged slabcaches.
1252  */
1253 
1254 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1255 {
1256 	unsigned long off = get_info_end(s);	/* The end of info */
1257 
1258 	if (s->flags & SLAB_STORE_USER) {
1259 		/* We also have user information there */
1260 		off += 2 * sizeof(struct track);
1261 
1262 		if (s->flags & SLAB_KMALLOC)
1263 			off += sizeof(unsigned int);
1264 	}
1265 
1266 	off += kasan_metadata_size(s, false);
1267 
1268 	if (size_from_object(s) == off)
1269 		return 1;
1270 
1271 	return check_bytes_and_report(s, slab, p, "Object padding",
1272 			p + off, POISON_INUSE, size_from_object(s) - off);
1273 }
1274 
1275 /* Check the pad bytes at the end of a slab page */
1276 static void slab_pad_check(struct kmem_cache *s, struct slab *slab)
1277 {
1278 	u8 *start;
1279 	u8 *fault;
1280 	u8 *end;
1281 	u8 *pad;
1282 	int length;
1283 	int remainder;
1284 
1285 	if (!(s->flags & SLAB_POISON))
1286 		return;
1287 
1288 	start = slab_address(slab);
1289 	length = slab_size(slab);
1290 	end = start + length;
1291 	remainder = length % s->size;
1292 	if (!remainder)
1293 		return;
1294 
1295 	pad = end - remainder;
1296 	metadata_access_enable();
1297 	fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1298 	metadata_access_disable();
1299 	if (!fault)
1300 		return;
1301 	while (end > fault && end[-1] == POISON_INUSE)
1302 		end--;
1303 
1304 	slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1305 			fault, end - 1, fault - start);
1306 	print_section(KERN_ERR, "Padding ", pad, remainder);
1307 
1308 	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1309 }
1310 
1311 static int check_object(struct kmem_cache *s, struct slab *slab,
1312 					void *object, u8 val)
1313 {
1314 	u8 *p = object;
1315 	u8 *endobject = object + s->object_size;
1316 	unsigned int orig_size, kasan_meta_size;
1317 	int ret = 1;
1318 
1319 	if (s->flags & SLAB_RED_ZONE) {
1320 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1321 			object - s->red_left_pad, val, s->red_left_pad))
1322 			ret = 0;
1323 
1324 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1325 			endobject, val, s->inuse - s->object_size))
1326 			ret = 0;
1327 
1328 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1329 			orig_size = get_orig_size(s, object);
1330 
1331 			if (s->object_size > orig_size  &&
1332 				!check_bytes_and_report(s, slab, object,
1333 					"kmalloc Redzone", p + orig_size,
1334 					val, s->object_size - orig_size)) {
1335 				ret = 0;
1336 			}
1337 		}
1338 	} else {
1339 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1340 			if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1341 				endobject, POISON_INUSE,
1342 				s->inuse - s->object_size))
1343 				ret = 0;
1344 		}
1345 	}
1346 
1347 	if (s->flags & SLAB_POISON) {
1348 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1349 			/*
1350 			 * KASAN can save its free meta data inside of the
1351 			 * object at offset 0. Thus, skip checking the part of
1352 			 * the redzone that overlaps with the meta data.
1353 			 */
1354 			kasan_meta_size = kasan_metadata_size(s, true);
1355 			if (kasan_meta_size < s->object_size - 1 &&
1356 			    !check_bytes_and_report(s, slab, p, "Poison",
1357 					p + kasan_meta_size, POISON_FREE,
1358 					s->object_size - kasan_meta_size - 1))
1359 				ret = 0;
1360 			if (kasan_meta_size < s->object_size &&
1361 			    !check_bytes_and_report(s, slab, p, "End Poison",
1362 					p + s->object_size - 1, POISON_END, 1))
1363 				ret = 0;
1364 		}
1365 		/*
1366 		 * check_pad_bytes cleans up on its own.
1367 		 */
1368 		if (!check_pad_bytes(s, slab, p))
1369 			ret = 0;
1370 	}
1371 
1372 	/*
1373 	 * Cannot check freepointer while object is allocated if
1374 	 * object and freepointer overlap.
1375 	 */
1376 	if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1377 	    !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1378 		object_err(s, slab, p, "Freepointer corrupt");
1379 		/*
1380 		 * No choice but to zap it and thus lose the remainder
1381 		 * of the free objects in this slab. May cause
1382 		 * another error because the object count is now wrong.
1383 		 */
1384 		set_freepointer(s, p, NULL);
1385 		ret = 0;
1386 	}
1387 
1388 	if (!ret && !slab_in_kunit_test()) {
1389 		print_trailer(s, slab, object);
1390 		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1391 	}
1392 
1393 	return ret;
1394 }
1395 
1396 static int check_slab(struct kmem_cache *s, struct slab *slab)
1397 {
1398 	int maxobj;
1399 
1400 	if (!folio_test_slab(slab_folio(slab))) {
1401 		slab_err(s, slab, "Not a valid slab page");
1402 		return 0;
1403 	}
1404 
1405 	maxobj = order_objects(slab_order(slab), s->size);
1406 	if (slab->objects > maxobj) {
1407 		slab_err(s, slab, "objects %u > max %u",
1408 			slab->objects, maxobj);
1409 		return 0;
1410 	}
1411 	if (slab->inuse > slab->objects) {
1412 		slab_err(s, slab, "inuse %u > max %u",
1413 			slab->inuse, slab->objects);
1414 		return 0;
1415 	}
1416 	/* Slab_pad_check fixes things up after itself */
1417 	slab_pad_check(s, slab);
1418 	return 1;
1419 }
1420 
1421 /*
1422  * Determine if a certain object in a slab is on the freelist. Must hold the
1423  * slab lock to guarantee that the chains are in a consistent state.
1424  */
1425 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1426 {
1427 	int nr = 0;
1428 	void *fp;
1429 	void *object = NULL;
1430 	int max_objects;
1431 
1432 	fp = slab->freelist;
1433 	while (fp && nr <= slab->objects) {
1434 		if (fp == search)
1435 			return 1;
1436 		if (!check_valid_pointer(s, slab, fp)) {
1437 			if (object) {
1438 				object_err(s, slab, object,
1439 					"Freechain corrupt");
1440 				set_freepointer(s, object, NULL);
1441 			} else {
1442 				slab_err(s, slab, "Freepointer corrupt");
1443 				slab->freelist = NULL;
1444 				slab->inuse = slab->objects;
1445 				slab_fix(s, "Freelist cleared");
1446 				return 0;
1447 			}
1448 			break;
1449 		}
1450 		object = fp;
1451 		fp = get_freepointer(s, object);
1452 		nr++;
1453 	}
1454 
1455 	max_objects = order_objects(slab_order(slab), s->size);
1456 	if (max_objects > MAX_OBJS_PER_PAGE)
1457 		max_objects = MAX_OBJS_PER_PAGE;
1458 
1459 	if (slab->objects != max_objects) {
1460 		slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1461 			 slab->objects, max_objects);
1462 		slab->objects = max_objects;
1463 		slab_fix(s, "Number of objects adjusted");
1464 	}
1465 	if (slab->inuse != slab->objects - nr) {
1466 		slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1467 			 slab->inuse, slab->objects - nr);
1468 		slab->inuse = slab->objects - nr;
1469 		slab_fix(s, "Object count adjusted");
1470 	}
1471 	return search == NULL;
1472 }
1473 
1474 static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1475 								int alloc)
1476 {
1477 	if (s->flags & SLAB_TRACE) {
1478 		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1479 			s->name,
1480 			alloc ? "alloc" : "free",
1481 			object, slab->inuse,
1482 			slab->freelist);
1483 
1484 		if (!alloc)
1485 			print_section(KERN_INFO, "Object ", (void *)object,
1486 					s->object_size);
1487 
1488 		dump_stack();
1489 	}
1490 }
1491 
1492 /*
1493  * Tracking of fully allocated slabs for debugging purposes.
1494  */
1495 static void add_full(struct kmem_cache *s,
1496 	struct kmem_cache_node *n, struct slab *slab)
1497 {
1498 	if (!(s->flags & SLAB_STORE_USER))
1499 		return;
1500 
1501 	lockdep_assert_held(&n->list_lock);
1502 	list_add(&slab->slab_list, &n->full);
1503 }
1504 
1505 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1506 {
1507 	if (!(s->flags & SLAB_STORE_USER))
1508 		return;
1509 
1510 	lockdep_assert_held(&n->list_lock);
1511 	list_del(&slab->slab_list);
1512 }
1513 
1514 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1515 {
1516 	return atomic_long_read(&n->nr_slabs);
1517 }
1518 
1519 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1520 {
1521 	struct kmem_cache_node *n = get_node(s, node);
1522 
1523 	atomic_long_inc(&n->nr_slabs);
1524 	atomic_long_add(objects, &n->total_objects);
1525 }
1526 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1527 {
1528 	struct kmem_cache_node *n = get_node(s, node);
1529 
1530 	atomic_long_dec(&n->nr_slabs);
1531 	atomic_long_sub(objects, &n->total_objects);
1532 }
1533 
1534 /* Object debug checks for alloc/free paths */
1535 static void setup_object_debug(struct kmem_cache *s, void *object)
1536 {
1537 	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1538 		return;
1539 
1540 	init_object(s, object, SLUB_RED_INACTIVE);
1541 	init_tracking(s, object);
1542 }
1543 
1544 static
1545 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1546 {
1547 	if (!kmem_cache_debug_flags(s, SLAB_POISON))
1548 		return;
1549 
1550 	metadata_access_enable();
1551 	memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1552 	metadata_access_disable();
1553 }
1554 
1555 static inline int alloc_consistency_checks(struct kmem_cache *s,
1556 					struct slab *slab, void *object)
1557 {
1558 	if (!check_slab(s, slab))
1559 		return 0;
1560 
1561 	if (!check_valid_pointer(s, slab, object)) {
1562 		object_err(s, slab, object, "Freelist Pointer check fails");
1563 		return 0;
1564 	}
1565 
1566 	if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1567 		return 0;
1568 
1569 	return 1;
1570 }
1571 
1572 static noinline bool alloc_debug_processing(struct kmem_cache *s,
1573 			struct slab *slab, void *object, int orig_size)
1574 {
1575 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1576 		if (!alloc_consistency_checks(s, slab, object))
1577 			goto bad;
1578 	}
1579 
1580 	/* Success. Perform special debug activities for allocs */
1581 	trace(s, slab, object, 1);
1582 	set_orig_size(s, object, orig_size);
1583 	init_object(s, object, SLUB_RED_ACTIVE);
1584 	return true;
1585 
1586 bad:
1587 	if (folio_test_slab(slab_folio(slab))) {
1588 		/*
1589 		 * If this is a slab page then lets do the best we can
1590 		 * to avoid issues in the future. Marking all objects
1591 		 * as used avoids touching the remaining objects.
1592 		 */
1593 		slab_fix(s, "Marking all objects used");
1594 		slab->inuse = slab->objects;
1595 		slab->freelist = NULL;
1596 	}
1597 	return false;
1598 }
1599 
1600 static inline int free_consistency_checks(struct kmem_cache *s,
1601 		struct slab *slab, void *object, unsigned long addr)
1602 {
1603 	if (!check_valid_pointer(s, slab, object)) {
1604 		slab_err(s, slab, "Invalid object pointer 0x%p", object);
1605 		return 0;
1606 	}
1607 
1608 	if (on_freelist(s, slab, object)) {
1609 		object_err(s, slab, object, "Object already free");
1610 		return 0;
1611 	}
1612 
1613 	if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1614 		return 0;
1615 
1616 	if (unlikely(s != slab->slab_cache)) {
1617 		if (!folio_test_slab(slab_folio(slab))) {
1618 			slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
1619 				 object);
1620 		} else if (!slab->slab_cache) {
1621 			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1622 			       object);
1623 			dump_stack();
1624 		} else
1625 			object_err(s, slab, object,
1626 					"page slab pointer corrupt.");
1627 		return 0;
1628 	}
1629 	return 1;
1630 }
1631 
1632 /*
1633  * Parse a block of slab_debug options. Blocks are delimited by ';'
1634  *
1635  * @str:    start of block
1636  * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1637  * @slabs:  return start of list of slabs, or NULL when there's no list
1638  * @init:   assume this is initial parsing and not per-kmem-create parsing
1639  *
1640  * returns the start of next block if there's any, or NULL
1641  */
1642 static char *
1643 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1644 {
1645 	bool higher_order_disable = false;
1646 
1647 	/* Skip any completely empty blocks */
1648 	while (*str && *str == ';')
1649 		str++;
1650 
1651 	if (*str == ',') {
1652 		/*
1653 		 * No options but restriction on slabs. This means full
1654 		 * debugging for slabs matching a pattern.
1655 		 */
1656 		*flags = DEBUG_DEFAULT_FLAGS;
1657 		goto check_slabs;
1658 	}
1659 	*flags = 0;
1660 
1661 	/* Determine which debug features should be switched on */
1662 	for (; *str && *str != ',' && *str != ';'; str++) {
1663 		switch (tolower(*str)) {
1664 		case '-':
1665 			*flags = 0;
1666 			break;
1667 		case 'f':
1668 			*flags |= SLAB_CONSISTENCY_CHECKS;
1669 			break;
1670 		case 'z':
1671 			*flags |= SLAB_RED_ZONE;
1672 			break;
1673 		case 'p':
1674 			*flags |= SLAB_POISON;
1675 			break;
1676 		case 'u':
1677 			*flags |= SLAB_STORE_USER;
1678 			break;
1679 		case 't':
1680 			*flags |= SLAB_TRACE;
1681 			break;
1682 		case 'a':
1683 			*flags |= SLAB_FAILSLAB;
1684 			break;
1685 		case 'o':
1686 			/*
1687 			 * Avoid enabling debugging on caches if its minimum
1688 			 * order would increase as a result.
1689 			 */
1690 			higher_order_disable = true;
1691 			break;
1692 		default:
1693 			if (init)
1694 				pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1695 		}
1696 	}
1697 check_slabs:
1698 	if (*str == ',')
1699 		*slabs = ++str;
1700 	else
1701 		*slabs = NULL;
1702 
1703 	/* Skip over the slab list */
1704 	while (*str && *str != ';')
1705 		str++;
1706 
1707 	/* Skip any completely empty blocks */
1708 	while (*str && *str == ';')
1709 		str++;
1710 
1711 	if (init && higher_order_disable)
1712 		disable_higher_order_debug = 1;
1713 
1714 	if (*str)
1715 		return str;
1716 	else
1717 		return NULL;
1718 }
1719 
1720 static int __init setup_slub_debug(char *str)
1721 {
1722 	slab_flags_t flags;
1723 	slab_flags_t global_flags;
1724 	char *saved_str;
1725 	char *slab_list;
1726 	bool global_slub_debug_changed = false;
1727 	bool slab_list_specified = false;
1728 
1729 	global_flags = DEBUG_DEFAULT_FLAGS;
1730 	if (*str++ != '=' || !*str)
1731 		/*
1732 		 * No options specified. Switch on full debugging.
1733 		 */
1734 		goto out;
1735 
1736 	saved_str = str;
1737 	while (str) {
1738 		str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1739 
1740 		if (!slab_list) {
1741 			global_flags = flags;
1742 			global_slub_debug_changed = true;
1743 		} else {
1744 			slab_list_specified = true;
1745 			if (flags & SLAB_STORE_USER)
1746 				stack_depot_request_early_init();
1747 		}
1748 	}
1749 
1750 	/*
1751 	 * For backwards compatibility, a single list of flags with list of
1752 	 * slabs means debugging is only changed for those slabs, so the global
1753 	 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1754 	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1755 	 * long as there is no option specifying flags without a slab list.
1756 	 */
1757 	if (slab_list_specified) {
1758 		if (!global_slub_debug_changed)
1759 			global_flags = slub_debug;
1760 		slub_debug_string = saved_str;
1761 	}
1762 out:
1763 	slub_debug = global_flags;
1764 	if (slub_debug & SLAB_STORE_USER)
1765 		stack_depot_request_early_init();
1766 	if (slub_debug != 0 || slub_debug_string)
1767 		static_branch_enable(&slub_debug_enabled);
1768 	else
1769 		static_branch_disable(&slub_debug_enabled);
1770 	if ((static_branch_unlikely(&init_on_alloc) ||
1771 	     static_branch_unlikely(&init_on_free)) &&
1772 	    (slub_debug & SLAB_POISON))
1773 		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1774 	return 1;
1775 }
1776 
1777 __setup("slab_debug", setup_slub_debug);
1778 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0);
1779 
1780 /*
1781  * kmem_cache_flags - apply debugging options to the cache
1782  * @flags:		flags to set
1783  * @name:		name of the cache
1784  *
1785  * Debug option(s) are applied to @flags. In addition to the debug
1786  * option(s), if a slab name (or multiple) is specified i.e.
1787  * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1788  * then only the select slabs will receive the debug option(s).
1789  */
1790 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1791 {
1792 	char *iter;
1793 	size_t len;
1794 	char *next_block;
1795 	slab_flags_t block_flags;
1796 	slab_flags_t slub_debug_local = slub_debug;
1797 
1798 	if (flags & SLAB_NO_USER_FLAGS)
1799 		return flags;
1800 
1801 	/*
1802 	 * If the slab cache is for debugging (e.g. kmemleak) then
1803 	 * don't store user (stack trace) information by default,
1804 	 * but let the user enable it via the command line below.
1805 	 */
1806 	if (flags & SLAB_NOLEAKTRACE)
1807 		slub_debug_local &= ~SLAB_STORE_USER;
1808 
1809 	len = strlen(name);
1810 	next_block = slub_debug_string;
1811 	/* Go through all blocks of debug options, see if any matches our slab's name */
1812 	while (next_block) {
1813 		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1814 		if (!iter)
1815 			continue;
1816 		/* Found a block that has a slab list, search it */
1817 		while (*iter) {
1818 			char *end, *glob;
1819 			size_t cmplen;
1820 
1821 			end = strchrnul(iter, ',');
1822 			if (next_block && next_block < end)
1823 				end = next_block - 1;
1824 
1825 			glob = strnchr(iter, end - iter, '*');
1826 			if (glob)
1827 				cmplen = glob - iter;
1828 			else
1829 				cmplen = max_t(size_t, len, (end - iter));
1830 
1831 			if (!strncmp(name, iter, cmplen)) {
1832 				flags |= block_flags;
1833 				return flags;
1834 			}
1835 
1836 			if (!*end || *end == ';')
1837 				break;
1838 			iter = end + 1;
1839 		}
1840 	}
1841 
1842 	return flags | slub_debug_local;
1843 }
1844 #else /* !CONFIG_SLUB_DEBUG */
1845 static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1846 static inline
1847 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1848 
1849 static inline bool alloc_debug_processing(struct kmem_cache *s,
1850 	struct slab *slab, void *object, int orig_size) { return true; }
1851 
1852 static inline bool free_debug_processing(struct kmem_cache *s,
1853 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
1854 	unsigned long addr, depot_stack_handle_t handle) { return true; }
1855 
1856 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
1857 static inline int check_object(struct kmem_cache *s, struct slab *slab,
1858 			void *object, u8 val) { return 1; }
1859 static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
1860 static inline void set_track(struct kmem_cache *s, void *object,
1861 			     enum track_item alloc, unsigned long addr) {}
1862 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1863 					struct slab *slab) {}
1864 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1865 					struct slab *slab) {}
1866 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1867 {
1868 	return flags;
1869 }
1870 #define slub_debug 0
1871 
1872 #define disable_higher_order_debug 0
1873 
1874 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1875 							{ return 0; }
1876 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1877 							int objects) {}
1878 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1879 							int objects) {}
1880 
1881 #ifndef CONFIG_SLUB_TINY
1882 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1883 			       void **freelist, void *nextfree)
1884 {
1885 	return false;
1886 }
1887 #endif
1888 #endif /* CONFIG_SLUB_DEBUG */
1889 
1890 #ifdef CONFIG_SLAB_OBJ_EXT
1891 
1892 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
1893 
1894 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
1895 {
1896 	struct slabobj_ext *slab_exts;
1897 	struct slab *obj_exts_slab;
1898 
1899 	obj_exts_slab = virt_to_slab(obj_exts);
1900 	slab_exts = slab_obj_exts(obj_exts_slab);
1901 	if (slab_exts) {
1902 		unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
1903 						 obj_exts_slab, obj_exts);
1904 		/* codetag should be NULL */
1905 		WARN_ON(slab_exts[offs].ref.ct);
1906 		set_codetag_empty(&slab_exts[offs].ref);
1907 	}
1908 }
1909 
1910 static inline void mark_failed_objexts_alloc(struct slab *slab)
1911 {
1912 	slab->obj_exts = OBJEXTS_ALLOC_FAIL;
1913 }
1914 
1915 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
1916 			struct slabobj_ext *vec, unsigned int objects)
1917 {
1918 	/*
1919 	 * If vector previously failed to allocate then we have live
1920 	 * objects with no tag reference. Mark all references in this
1921 	 * vector as empty to avoid warnings later on.
1922 	 */
1923 	if (obj_exts & OBJEXTS_ALLOC_FAIL) {
1924 		unsigned int i;
1925 
1926 		for (i = 0; i < objects; i++)
1927 			set_codetag_empty(&vec[i].ref);
1928 	}
1929 }
1930 
1931 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1932 
1933 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
1934 static inline void mark_failed_objexts_alloc(struct slab *slab) {}
1935 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
1936 			struct slabobj_ext *vec, unsigned int objects) {}
1937 
1938 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1939 
1940 /*
1941  * The allocated objcg pointers array is not accounted directly.
1942  * Moreover, it should not come from DMA buffer and is not readily
1943  * reclaimable. So those GFP bits should be masked off.
1944  */
1945 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
1946 				__GFP_ACCOUNT | __GFP_NOFAIL)
1947 
1948 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
1949 		        gfp_t gfp, bool new_slab)
1950 {
1951 	unsigned int objects = objs_per_slab(s, slab);
1952 	unsigned long new_exts;
1953 	unsigned long old_exts;
1954 	struct slabobj_ext *vec;
1955 
1956 	gfp &= ~OBJCGS_CLEAR_MASK;
1957 	/* Prevent recursive extension vector allocation */
1958 	gfp |= __GFP_NO_OBJ_EXT;
1959 	vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
1960 			   slab_nid(slab));
1961 	if (!vec) {
1962 		/* Mark vectors which failed to allocate */
1963 		if (new_slab)
1964 			mark_failed_objexts_alloc(slab);
1965 
1966 		return -ENOMEM;
1967 	}
1968 
1969 	new_exts = (unsigned long)vec;
1970 #ifdef CONFIG_MEMCG
1971 	new_exts |= MEMCG_DATA_OBJEXTS;
1972 #endif
1973 	old_exts = slab->obj_exts;
1974 	handle_failed_objexts_alloc(old_exts, vec, objects);
1975 	if (new_slab) {
1976 		/*
1977 		 * If the slab is brand new and nobody can yet access its
1978 		 * obj_exts, no synchronization is required and obj_exts can
1979 		 * be simply assigned.
1980 		 */
1981 		slab->obj_exts = new_exts;
1982 	} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
1983 		/*
1984 		 * If the slab is already in use, somebody can allocate and
1985 		 * assign slabobj_exts in parallel. In this case the existing
1986 		 * objcg vector should be reused.
1987 		 */
1988 		mark_objexts_empty(vec);
1989 		kfree(vec);
1990 		return 0;
1991 	}
1992 
1993 	kmemleak_not_leak(vec);
1994 	return 0;
1995 }
1996 
1997 static inline void free_slab_obj_exts(struct slab *slab)
1998 {
1999 	struct slabobj_ext *obj_exts;
2000 
2001 	obj_exts = slab_obj_exts(slab);
2002 	if (!obj_exts)
2003 		return;
2004 
2005 	/*
2006 	 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
2007 	 * corresponding extension will be NULL. alloc_tag_sub() will throw a
2008 	 * warning if slab has extensions but the extension of an object is
2009 	 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
2010 	 * the extension for obj_exts is expected to be NULL.
2011 	 */
2012 	mark_objexts_empty(obj_exts);
2013 	kfree(obj_exts);
2014 	slab->obj_exts = 0;
2015 }
2016 
2017 static inline bool need_slab_obj_ext(void)
2018 {
2019 	if (mem_alloc_profiling_enabled())
2020 		return true;
2021 
2022 	/*
2023 	 * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
2024 	 * inside memcg_slab_post_alloc_hook. No other users for now.
2025 	 */
2026 	return false;
2027 }
2028 
2029 static inline struct slabobj_ext *
2030 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
2031 {
2032 	struct slab *slab;
2033 
2034 	if (!p)
2035 		return NULL;
2036 
2037 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2038 		return NULL;
2039 
2040 	if (flags & __GFP_NO_OBJ_EXT)
2041 		return NULL;
2042 
2043 	slab = virt_to_slab(p);
2044 	if (!slab_obj_exts(slab) &&
2045 	    WARN(alloc_slab_obj_exts(slab, s, flags, false),
2046 		 "%s, %s: Failed to create slab extension vector!\n",
2047 		 __func__, s->name))
2048 		return NULL;
2049 
2050 	return slab_obj_exts(slab) + obj_to_index(s, slab, p);
2051 }
2052 
2053 static inline void
2054 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2055 			     int objects)
2056 {
2057 #ifdef CONFIG_MEM_ALLOC_PROFILING
2058 	struct slabobj_ext *obj_exts;
2059 	int i;
2060 
2061 	if (!mem_alloc_profiling_enabled())
2062 		return;
2063 
2064 	obj_exts = slab_obj_exts(slab);
2065 	if (!obj_exts)
2066 		return;
2067 
2068 	for (i = 0; i < objects; i++) {
2069 		unsigned int off = obj_to_index(s, slab, p[i]);
2070 
2071 		alloc_tag_sub(&obj_exts[off].ref, s->size);
2072 	}
2073 #endif
2074 }
2075 
2076 #else /* CONFIG_SLAB_OBJ_EXT */
2077 
2078 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2079 			       gfp_t gfp, bool new_slab)
2080 {
2081 	return 0;
2082 }
2083 
2084 static inline void free_slab_obj_exts(struct slab *slab)
2085 {
2086 }
2087 
2088 static inline bool need_slab_obj_ext(void)
2089 {
2090 	return false;
2091 }
2092 
2093 static inline struct slabobj_ext *
2094 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
2095 {
2096 	return NULL;
2097 }
2098 
2099 static inline void
2100 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2101 			     int objects)
2102 {
2103 }
2104 
2105 #endif /* CONFIG_SLAB_OBJ_EXT */
2106 
2107 #ifdef CONFIG_MEMCG_KMEM
2108 
2109 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
2110 
2111 static __fastpath_inline
2112 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2113 				gfp_t flags, size_t size, void **p)
2114 {
2115 	if (likely(!memcg_kmem_online()))
2116 		return true;
2117 
2118 	if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2119 		return true;
2120 
2121 	if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
2122 		return true;
2123 
2124 	if (likely(size == 1)) {
2125 		memcg_alloc_abort_single(s, *p);
2126 		*p = NULL;
2127 	} else {
2128 		kmem_cache_free_bulk(s, size, p);
2129 	}
2130 
2131 	return false;
2132 }
2133 
2134 static __fastpath_inline
2135 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2136 			  int objects)
2137 {
2138 	struct slabobj_ext *obj_exts;
2139 
2140 	if (!memcg_kmem_online())
2141 		return;
2142 
2143 	obj_exts = slab_obj_exts(slab);
2144 	if (likely(!obj_exts))
2145 		return;
2146 
2147 	__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2148 }
2149 #else /* CONFIG_MEMCG_KMEM */
2150 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
2151 					      struct list_lru *lru,
2152 					      gfp_t flags, size_t size,
2153 					      void **p)
2154 {
2155 	return true;
2156 }
2157 
2158 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2159 					void **p, int objects)
2160 {
2161 }
2162 #endif /* CONFIG_MEMCG_KMEM */
2163 
2164 /*
2165  * Hooks for other subsystems that check memory allocations. In a typical
2166  * production configuration these hooks all should produce no code at all.
2167  *
2168  * Returns true if freeing of the object can proceed, false if its reuse
2169  * was delayed by KASAN quarantine, or it was returned to KFENCE.
2170  */
2171 static __always_inline
2172 bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
2173 {
2174 	kmemleak_free_recursive(x, s->flags);
2175 	kmsan_slab_free(s, x);
2176 
2177 	debug_check_no_locks_freed(x, s->object_size);
2178 
2179 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
2180 		debug_check_no_obj_freed(x, s->object_size);
2181 
2182 	/* Use KCSAN to help debug racy use-after-free. */
2183 	if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
2184 		__kcsan_check_access(x, s->object_size,
2185 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2186 
2187 	if (kfence_free(x))
2188 		return false;
2189 
2190 	/*
2191 	 * As memory initialization might be integrated into KASAN,
2192 	 * kasan_slab_free and initialization memset's must be
2193 	 * kept together to avoid discrepancies in behavior.
2194 	 *
2195 	 * The initialization memset's clear the object and the metadata,
2196 	 * but don't touch the SLAB redzone.
2197 	 *
2198 	 * The object's freepointer is also avoided if stored outside the
2199 	 * object.
2200 	 */
2201 	if (unlikely(init)) {
2202 		int rsize;
2203 		unsigned int inuse;
2204 
2205 		inuse = get_info_end(s);
2206 		if (!kasan_has_integrated_init())
2207 			memset(kasan_reset_tag(x), 0, s->object_size);
2208 		rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2209 		memset((char *)kasan_reset_tag(x) + inuse, 0,
2210 		       s->size - inuse - rsize);
2211 	}
2212 	/* KASAN might put x into memory quarantine, delaying its reuse. */
2213 	return !kasan_slab_free(s, x, init);
2214 }
2215 
2216 static __fastpath_inline
2217 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
2218 			     int *cnt)
2219 {
2220 
2221 	void *object;
2222 	void *next = *head;
2223 	void *old_tail = *tail;
2224 	bool init;
2225 
2226 	if (is_kfence_address(next)) {
2227 		slab_free_hook(s, next, false);
2228 		return false;
2229 	}
2230 
2231 	/* Head and tail of the reconstructed freelist */
2232 	*head = NULL;
2233 	*tail = NULL;
2234 
2235 	init = slab_want_init_on_free(s);
2236 
2237 	do {
2238 		object = next;
2239 		next = get_freepointer(s, object);
2240 
2241 		/* If object's reuse doesn't have to be delayed */
2242 		if (likely(slab_free_hook(s, object, init))) {
2243 			/* Move object to the new freelist */
2244 			set_freepointer(s, object, *head);
2245 			*head = object;
2246 			if (!*tail)
2247 				*tail = object;
2248 		} else {
2249 			/*
2250 			 * Adjust the reconstructed freelist depth
2251 			 * accordingly if object's reuse is delayed.
2252 			 */
2253 			--(*cnt);
2254 		}
2255 	} while (object != old_tail);
2256 
2257 	return *head != NULL;
2258 }
2259 
2260 static void *setup_object(struct kmem_cache *s, void *object)
2261 {
2262 	setup_object_debug(s, object);
2263 	object = kasan_init_slab_obj(s, object);
2264 	if (unlikely(s->ctor)) {
2265 		kasan_unpoison_new_object(s, object);
2266 		s->ctor(object);
2267 		kasan_poison_new_object(s, object);
2268 	}
2269 	return object;
2270 }
2271 
2272 /*
2273  * Slab allocation and freeing
2274  */
2275 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
2276 		struct kmem_cache_order_objects oo)
2277 {
2278 	struct folio *folio;
2279 	struct slab *slab;
2280 	unsigned int order = oo_order(oo);
2281 
2282 	folio = (struct folio *)alloc_pages_node(node, flags, order);
2283 	if (!folio)
2284 		return NULL;
2285 
2286 	slab = folio_slab(folio);
2287 	__folio_set_slab(folio);
2288 	/* Make the flag visible before any changes to folio->mapping */
2289 	smp_wmb();
2290 	if (folio_is_pfmemalloc(folio))
2291 		slab_set_pfmemalloc(slab);
2292 
2293 	return slab;
2294 }
2295 
2296 #ifdef CONFIG_SLAB_FREELIST_RANDOM
2297 /* Pre-initialize the random sequence cache */
2298 static int init_cache_random_seq(struct kmem_cache *s)
2299 {
2300 	unsigned int count = oo_objects(s->oo);
2301 	int err;
2302 
2303 	/* Bailout if already initialised */
2304 	if (s->random_seq)
2305 		return 0;
2306 
2307 	err = cache_random_seq_create(s, count, GFP_KERNEL);
2308 	if (err) {
2309 		pr_err("SLUB: Unable to initialize free list for %s\n",
2310 			s->name);
2311 		return err;
2312 	}
2313 
2314 	/* Transform to an offset on the set of pages */
2315 	if (s->random_seq) {
2316 		unsigned int i;
2317 
2318 		for (i = 0; i < count; i++)
2319 			s->random_seq[i] *= s->size;
2320 	}
2321 	return 0;
2322 }
2323 
2324 /* Initialize each random sequence freelist per cache */
2325 static void __init init_freelist_randomization(void)
2326 {
2327 	struct kmem_cache *s;
2328 
2329 	mutex_lock(&slab_mutex);
2330 
2331 	list_for_each_entry(s, &slab_caches, list)
2332 		init_cache_random_seq(s);
2333 
2334 	mutex_unlock(&slab_mutex);
2335 }
2336 
2337 /* Get the next entry on the pre-computed freelist randomized */
2338 static void *next_freelist_entry(struct kmem_cache *s,
2339 				unsigned long *pos, void *start,
2340 				unsigned long page_limit,
2341 				unsigned long freelist_count)
2342 {
2343 	unsigned int idx;
2344 
2345 	/*
2346 	 * If the target page allocation failed, the number of objects on the
2347 	 * page might be smaller than the usual size defined by the cache.
2348 	 */
2349 	do {
2350 		idx = s->random_seq[*pos];
2351 		*pos += 1;
2352 		if (*pos >= freelist_count)
2353 			*pos = 0;
2354 	} while (unlikely(idx >= page_limit));
2355 
2356 	return (char *)start + idx;
2357 }
2358 
2359 /* Shuffle the single linked freelist based on a random pre-computed sequence */
2360 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2361 {
2362 	void *start;
2363 	void *cur;
2364 	void *next;
2365 	unsigned long idx, pos, page_limit, freelist_count;
2366 
2367 	if (slab->objects < 2 || !s->random_seq)
2368 		return false;
2369 
2370 	freelist_count = oo_objects(s->oo);
2371 	pos = get_random_u32_below(freelist_count);
2372 
2373 	page_limit = slab->objects * s->size;
2374 	start = fixup_red_left(s, slab_address(slab));
2375 
2376 	/* First entry is used as the base of the freelist */
2377 	cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
2378 	cur = setup_object(s, cur);
2379 	slab->freelist = cur;
2380 
2381 	for (idx = 1; idx < slab->objects; idx++) {
2382 		next = next_freelist_entry(s, &pos, start, page_limit,
2383 			freelist_count);
2384 		next = setup_object(s, next);
2385 		set_freepointer(s, cur, next);
2386 		cur = next;
2387 	}
2388 	set_freepointer(s, cur, NULL);
2389 
2390 	return true;
2391 }
2392 #else
2393 static inline int init_cache_random_seq(struct kmem_cache *s)
2394 {
2395 	return 0;
2396 }
2397 static inline void init_freelist_randomization(void) { }
2398 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2399 {
2400 	return false;
2401 }
2402 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
2403 
2404 static __always_inline void account_slab(struct slab *slab, int order,
2405 					 struct kmem_cache *s, gfp_t gfp)
2406 {
2407 	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
2408 		alloc_slab_obj_exts(slab, s, gfp, true);
2409 
2410 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2411 			    PAGE_SIZE << order);
2412 }
2413 
2414 static __always_inline void unaccount_slab(struct slab *slab, int order,
2415 					   struct kmem_cache *s)
2416 {
2417 	if (memcg_kmem_online() || need_slab_obj_ext())
2418 		free_slab_obj_exts(slab);
2419 
2420 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2421 			    -(PAGE_SIZE << order));
2422 }
2423 
2424 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
2425 {
2426 	struct slab *slab;
2427 	struct kmem_cache_order_objects oo = s->oo;
2428 	gfp_t alloc_gfp;
2429 	void *start, *p, *next;
2430 	int idx;
2431 	bool shuffle;
2432 
2433 	flags &= gfp_allowed_mask;
2434 
2435 	flags |= s->allocflags;
2436 
2437 	/*
2438 	 * Let the initial higher-order allocation fail under memory pressure
2439 	 * so we fall-back to the minimum order allocation.
2440 	 */
2441 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
2442 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
2443 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
2444 
2445 	slab = alloc_slab_page(alloc_gfp, node, oo);
2446 	if (unlikely(!slab)) {
2447 		oo = s->min;
2448 		alloc_gfp = flags;
2449 		/*
2450 		 * Allocation may have failed due to fragmentation.
2451 		 * Try a lower order alloc if possible
2452 		 */
2453 		slab = alloc_slab_page(alloc_gfp, node, oo);
2454 		if (unlikely(!slab))
2455 			return NULL;
2456 		stat(s, ORDER_FALLBACK);
2457 	}
2458 
2459 	slab->objects = oo_objects(oo);
2460 	slab->inuse = 0;
2461 	slab->frozen = 0;
2462 
2463 	account_slab(slab, oo_order(oo), s, flags);
2464 
2465 	slab->slab_cache = s;
2466 
2467 	kasan_poison_slab(slab);
2468 
2469 	start = slab_address(slab);
2470 
2471 	setup_slab_debug(s, slab, start);
2472 
2473 	shuffle = shuffle_freelist(s, slab);
2474 
2475 	if (!shuffle) {
2476 		start = fixup_red_left(s, start);
2477 		start = setup_object(s, start);
2478 		slab->freelist = start;
2479 		for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2480 			next = p + s->size;
2481 			next = setup_object(s, next);
2482 			set_freepointer(s, p, next);
2483 			p = next;
2484 		}
2485 		set_freepointer(s, p, NULL);
2486 	}
2487 
2488 	return slab;
2489 }
2490 
2491 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
2492 {
2493 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
2494 		flags = kmalloc_fix_flags(flags);
2495 
2496 	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2497 
2498 	return allocate_slab(s,
2499 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
2500 }
2501 
2502 static void __free_slab(struct kmem_cache *s, struct slab *slab)
2503 {
2504 	struct folio *folio = slab_folio(slab);
2505 	int order = folio_order(folio);
2506 	int pages = 1 << order;
2507 
2508 	__slab_clear_pfmemalloc(slab);
2509 	folio->mapping = NULL;
2510 	/* Make the mapping reset visible before clearing the flag */
2511 	smp_wmb();
2512 	__folio_clear_slab(folio);
2513 	mm_account_reclaimed_pages(pages);
2514 	unaccount_slab(slab, order, s);
2515 	__free_pages(&folio->page, order);
2516 }
2517 
2518 static void rcu_free_slab(struct rcu_head *h)
2519 {
2520 	struct slab *slab = container_of(h, struct slab, rcu_head);
2521 
2522 	__free_slab(slab->slab_cache, slab);
2523 }
2524 
2525 static void free_slab(struct kmem_cache *s, struct slab *slab)
2526 {
2527 	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
2528 		void *p;
2529 
2530 		slab_pad_check(s, slab);
2531 		for_each_object(p, s, slab_address(slab), slab->objects)
2532 			check_object(s, slab, p, SLUB_RED_INACTIVE);
2533 	}
2534 
2535 	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
2536 		call_rcu(&slab->rcu_head, rcu_free_slab);
2537 	else
2538 		__free_slab(s, slab);
2539 }
2540 
2541 static void discard_slab(struct kmem_cache *s, struct slab *slab)
2542 {
2543 	dec_slabs_node(s, slab_nid(slab), slab->objects);
2544 	free_slab(s, slab);
2545 }
2546 
2547 /*
2548  * SLUB reuses PG_workingset bit to keep track of whether it's on
2549  * the per-node partial list.
2550  */
2551 static inline bool slab_test_node_partial(const struct slab *slab)
2552 {
2553 	return folio_test_workingset(slab_folio(slab));
2554 }
2555 
2556 static inline void slab_set_node_partial(struct slab *slab)
2557 {
2558 	set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2559 }
2560 
2561 static inline void slab_clear_node_partial(struct slab *slab)
2562 {
2563 	clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2564 }
2565 
2566 /*
2567  * Management of partially allocated slabs.
2568  */
2569 static inline void
2570 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
2571 {
2572 	n->nr_partial++;
2573 	if (tail == DEACTIVATE_TO_TAIL)
2574 		list_add_tail(&slab->slab_list, &n->partial);
2575 	else
2576 		list_add(&slab->slab_list, &n->partial);
2577 	slab_set_node_partial(slab);
2578 }
2579 
2580 static inline void add_partial(struct kmem_cache_node *n,
2581 				struct slab *slab, int tail)
2582 {
2583 	lockdep_assert_held(&n->list_lock);
2584 	__add_partial(n, slab, tail);
2585 }
2586 
2587 static inline void remove_partial(struct kmem_cache_node *n,
2588 					struct slab *slab)
2589 {
2590 	lockdep_assert_held(&n->list_lock);
2591 	list_del(&slab->slab_list);
2592 	slab_clear_node_partial(slab);
2593 	n->nr_partial--;
2594 }
2595 
2596 /*
2597  * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
2598  * slab from the n->partial list. Remove only a single object from the slab, do
2599  * the alloc_debug_processing() checks and leave the slab on the list, or move
2600  * it to full list if it was the last free object.
2601  */
2602 static void *alloc_single_from_partial(struct kmem_cache *s,
2603 		struct kmem_cache_node *n, struct slab *slab, int orig_size)
2604 {
2605 	void *object;
2606 
2607 	lockdep_assert_held(&n->list_lock);
2608 
2609 	object = slab->freelist;
2610 	slab->freelist = get_freepointer(s, object);
2611 	slab->inuse++;
2612 
2613 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
2614 		remove_partial(n, slab);
2615 		return NULL;
2616 	}
2617 
2618 	if (slab->inuse == slab->objects) {
2619 		remove_partial(n, slab);
2620 		add_full(s, n, slab);
2621 	}
2622 
2623 	return object;
2624 }
2625 
2626 /*
2627  * Called only for kmem_cache_debug() caches to allocate from a freshly
2628  * allocated slab. Allocate a single object instead of whole freelist
2629  * and put the slab to the partial (or full) list.
2630  */
2631 static void *alloc_single_from_new_slab(struct kmem_cache *s,
2632 					struct slab *slab, int orig_size)
2633 {
2634 	int nid = slab_nid(slab);
2635 	struct kmem_cache_node *n = get_node(s, nid);
2636 	unsigned long flags;
2637 	void *object;
2638 
2639 
2640 	object = slab->freelist;
2641 	slab->freelist = get_freepointer(s, object);
2642 	slab->inuse = 1;
2643 
2644 	if (!alloc_debug_processing(s, slab, object, orig_size))
2645 		/*
2646 		 * It's not really expected that this would fail on a
2647 		 * freshly allocated slab, but a concurrent memory
2648 		 * corruption in theory could cause that.
2649 		 */
2650 		return NULL;
2651 
2652 	spin_lock_irqsave(&n->list_lock, flags);
2653 
2654 	if (slab->inuse == slab->objects)
2655 		add_full(s, n, slab);
2656 	else
2657 		add_partial(n, slab, DEACTIVATE_TO_HEAD);
2658 
2659 	inc_slabs_node(s, nid, slab->objects);
2660 	spin_unlock_irqrestore(&n->list_lock, flags);
2661 
2662 	return object;
2663 }
2664 
2665 #ifdef CONFIG_SLUB_CPU_PARTIAL
2666 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2667 #else
2668 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
2669 				   int drain) { }
2670 #endif
2671 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
2672 
2673 /*
2674  * Try to allocate a partial slab from a specific node.
2675  */
2676 static struct slab *get_partial_node(struct kmem_cache *s,
2677 				     struct kmem_cache_node *n,
2678 				     struct partial_context *pc)
2679 {
2680 	struct slab *slab, *slab2, *partial = NULL;
2681 	unsigned long flags;
2682 	unsigned int partial_slabs = 0;
2683 
2684 	/*
2685 	 * Racy check. If we mistakenly see no partial slabs then we
2686 	 * just allocate an empty slab. If we mistakenly try to get a
2687 	 * partial slab and there is none available then get_partial()
2688 	 * will return NULL.
2689 	 */
2690 	if (!n || !n->nr_partial)
2691 		return NULL;
2692 
2693 	spin_lock_irqsave(&n->list_lock, flags);
2694 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
2695 		if (!pfmemalloc_match(slab, pc->flags))
2696 			continue;
2697 
2698 		if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
2699 			void *object = alloc_single_from_partial(s, n, slab,
2700 							pc->orig_size);
2701 			if (object) {
2702 				partial = slab;
2703 				pc->object = object;
2704 				break;
2705 			}
2706 			continue;
2707 		}
2708 
2709 		remove_partial(n, slab);
2710 
2711 		if (!partial) {
2712 			partial = slab;
2713 			stat(s, ALLOC_FROM_PARTIAL);
2714 
2715 			if ((slub_get_cpu_partial(s) == 0)) {
2716 				break;
2717 			}
2718 		} else {
2719 			put_cpu_partial(s, slab, 0);
2720 			stat(s, CPU_PARTIAL_NODE);
2721 
2722 			if (++partial_slabs > slub_get_cpu_partial(s) / 2) {
2723 				break;
2724 			}
2725 		}
2726 	}
2727 	spin_unlock_irqrestore(&n->list_lock, flags);
2728 	return partial;
2729 }
2730 
2731 /*
2732  * Get a slab from somewhere. Search in increasing NUMA distances.
2733  */
2734 static struct slab *get_any_partial(struct kmem_cache *s,
2735 				    struct partial_context *pc)
2736 {
2737 #ifdef CONFIG_NUMA
2738 	struct zonelist *zonelist;
2739 	struct zoneref *z;
2740 	struct zone *zone;
2741 	enum zone_type highest_zoneidx = gfp_zone(pc->flags);
2742 	struct slab *slab;
2743 	unsigned int cpuset_mems_cookie;
2744 
2745 	/*
2746 	 * The defrag ratio allows a configuration of the tradeoffs between
2747 	 * inter node defragmentation and node local allocations. A lower
2748 	 * defrag_ratio increases the tendency to do local allocations
2749 	 * instead of attempting to obtain partial slabs from other nodes.
2750 	 *
2751 	 * If the defrag_ratio is set to 0 then kmalloc() always
2752 	 * returns node local objects. If the ratio is higher then kmalloc()
2753 	 * may return off node objects because partial slabs are obtained
2754 	 * from other nodes and filled up.
2755 	 *
2756 	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2757 	 * (which makes defrag_ratio = 1000) then every (well almost)
2758 	 * allocation will first attempt to defrag slab caches on other nodes.
2759 	 * This means scanning over all nodes to look for partial slabs which
2760 	 * may be expensive if we do it every time we are trying to find a slab
2761 	 * with available objects.
2762 	 */
2763 	if (!s->remote_node_defrag_ratio ||
2764 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
2765 		return NULL;
2766 
2767 	do {
2768 		cpuset_mems_cookie = read_mems_allowed_begin();
2769 		zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
2770 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2771 			struct kmem_cache_node *n;
2772 
2773 			n = get_node(s, zone_to_nid(zone));
2774 
2775 			if (n && cpuset_zone_allowed(zone, pc->flags) &&
2776 					n->nr_partial > s->min_partial) {
2777 				slab = get_partial_node(s, n, pc);
2778 				if (slab) {
2779 					/*
2780 					 * Don't check read_mems_allowed_retry()
2781 					 * here - if mems_allowed was updated in
2782 					 * parallel, that was a harmless race
2783 					 * between allocation and the cpuset
2784 					 * update
2785 					 */
2786 					return slab;
2787 				}
2788 			}
2789 		}
2790 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
2791 #endif	/* CONFIG_NUMA */
2792 	return NULL;
2793 }
2794 
2795 /*
2796  * Get a partial slab, lock it and return it.
2797  */
2798 static struct slab *get_partial(struct kmem_cache *s, int node,
2799 				struct partial_context *pc)
2800 {
2801 	struct slab *slab;
2802 	int searchnode = node;
2803 
2804 	if (node == NUMA_NO_NODE)
2805 		searchnode = numa_mem_id();
2806 
2807 	slab = get_partial_node(s, get_node(s, searchnode), pc);
2808 	if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
2809 		return slab;
2810 
2811 	return get_any_partial(s, pc);
2812 }
2813 
2814 #ifndef CONFIG_SLUB_TINY
2815 
2816 #ifdef CONFIG_PREEMPTION
2817 /*
2818  * Calculate the next globally unique transaction for disambiguation
2819  * during cmpxchg. The transactions start with the cpu number and are then
2820  * incremented by CONFIG_NR_CPUS.
2821  */
2822 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
2823 #else
2824 /*
2825  * No preemption supported therefore also no need to check for
2826  * different cpus.
2827  */
2828 #define TID_STEP 1
2829 #endif /* CONFIG_PREEMPTION */
2830 
2831 static inline unsigned long next_tid(unsigned long tid)
2832 {
2833 	return tid + TID_STEP;
2834 }
2835 
2836 #ifdef SLUB_DEBUG_CMPXCHG
2837 static inline unsigned int tid_to_cpu(unsigned long tid)
2838 {
2839 	return tid % TID_STEP;
2840 }
2841 
2842 static inline unsigned long tid_to_event(unsigned long tid)
2843 {
2844 	return tid / TID_STEP;
2845 }
2846 #endif
2847 
2848 static inline unsigned int init_tid(int cpu)
2849 {
2850 	return cpu;
2851 }
2852 
2853 static inline void note_cmpxchg_failure(const char *n,
2854 		const struct kmem_cache *s, unsigned long tid)
2855 {
2856 #ifdef SLUB_DEBUG_CMPXCHG
2857 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2858 
2859 	pr_info("%s %s: cmpxchg redo ", n, s->name);
2860 
2861 #ifdef CONFIG_PREEMPTION
2862 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2863 		pr_warn("due to cpu change %d -> %d\n",
2864 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2865 	else
2866 #endif
2867 	if (tid_to_event(tid) != tid_to_event(actual_tid))
2868 		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2869 			tid_to_event(tid), tid_to_event(actual_tid));
2870 	else
2871 		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2872 			actual_tid, tid, next_tid(tid));
2873 #endif
2874 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2875 }
2876 
2877 static void init_kmem_cache_cpus(struct kmem_cache *s)
2878 {
2879 	int cpu;
2880 	struct kmem_cache_cpu *c;
2881 
2882 	for_each_possible_cpu(cpu) {
2883 		c = per_cpu_ptr(s->cpu_slab, cpu);
2884 		local_lock_init(&c->lock);
2885 		c->tid = init_tid(cpu);
2886 	}
2887 }
2888 
2889 /*
2890  * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
2891  * unfreezes the slabs and puts it on the proper list.
2892  * Assumes the slab has been already safely taken away from kmem_cache_cpu
2893  * by the caller.
2894  */
2895 static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
2896 			    void *freelist)
2897 {
2898 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
2899 	int free_delta = 0;
2900 	void *nextfree, *freelist_iter, *freelist_tail;
2901 	int tail = DEACTIVATE_TO_HEAD;
2902 	unsigned long flags = 0;
2903 	struct slab new;
2904 	struct slab old;
2905 
2906 	if (READ_ONCE(slab->freelist)) {
2907 		stat(s, DEACTIVATE_REMOTE_FREES);
2908 		tail = DEACTIVATE_TO_TAIL;
2909 	}
2910 
2911 	/*
2912 	 * Stage one: Count the objects on cpu's freelist as free_delta and
2913 	 * remember the last object in freelist_tail for later splicing.
2914 	 */
2915 	freelist_tail = NULL;
2916 	freelist_iter = freelist;
2917 	while (freelist_iter) {
2918 		nextfree = get_freepointer(s, freelist_iter);
2919 
2920 		/*
2921 		 * If 'nextfree' is invalid, it is possible that the object at
2922 		 * 'freelist_iter' is already corrupted.  So isolate all objects
2923 		 * starting at 'freelist_iter' by skipping them.
2924 		 */
2925 		if (freelist_corrupted(s, slab, &freelist_iter, nextfree))
2926 			break;
2927 
2928 		freelist_tail = freelist_iter;
2929 		free_delta++;
2930 
2931 		freelist_iter = nextfree;
2932 	}
2933 
2934 	/*
2935 	 * Stage two: Unfreeze the slab while splicing the per-cpu
2936 	 * freelist to the head of slab's freelist.
2937 	 */
2938 	do {
2939 		old.freelist = READ_ONCE(slab->freelist);
2940 		old.counters = READ_ONCE(slab->counters);
2941 		VM_BUG_ON(!old.frozen);
2942 
2943 		/* Determine target state of the slab */
2944 		new.counters = old.counters;
2945 		new.frozen = 0;
2946 		if (freelist_tail) {
2947 			new.inuse -= free_delta;
2948 			set_freepointer(s, freelist_tail, old.freelist);
2949 			new.freelist = freelist;
2950 		} else {
2951 			new.freelist = old.freelist;
2952 		}
2953 	} while (!slab_update_freelist(s, slab,
2954 		old.freelist, old.counters,
2955 		new.freelist, new.counters,
2956 		"unfreezing slab"));
2957 
2958 	/*
2959 	 * Stage three: Manipulate the slab list based on the updated state.
2960 	 */
2961 	if (!new.inuse && n->nr_partial >= s->min_partial) {
2962 		stat(s, DEACTIVATE_EMPTY);
2963 		discard_slab(s, slab);
2964 		stat(s, FREE_SLAB);
2965 	} else if (new.freelist) {
2966 		spin_lock_irqsave(&n->list_lock, flags);
2967 		add_partial(n, slab, tail);
2968 		spin_unlock_irqrestore(&n->list_lock, flags);
2969 		stat(s, tail);
2970 	} else {
2971 		stat(s, DEACTIVATE_FULL);
2972 	}
2973 }
2974 
2975 #ifdef CONFIG_SLUB_CPU_PARTIAL
2976 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
2977 {
2978 	struct kmem_cache_node *n = NULL, *n2 = NULL;
2979 	struct slab *slab, *slab_to_discard = NULL;
2980 	unsigned long flags = 0;
2981 
2982 	while (partial_slab) {
2983 		slab = partial_slab;
2984 		partial_slab = slab->next;
2985 
2986 		n2 = get_node(s, slab_nid(slab));
2987 		if (n != n2) {
2988 			if (n)
2989 				spin_unlock_irqrestore(&n->list_lock, flags);
2990 
2991 			n = n2;
2992 			spin_lock_irqsave(&n->list_lock, flags);
2993 		}
2994 
2995 		if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
2996 			slab->next = slab_to_discard;
2997 			slab_to_discard = slab;
2998 		} else {
2999 			add_partial(n, slab, DEACTIVATE_TO_TAIL);
3000 			stat(s, FREE_ADD_PARTIAL);
3001 		}
3002 	}
3003 
3004 	if (n)
3005 		spin_unlock_irqrestore(&n->list_lock, flags);
3006 
3007 	while (slab_to_discard) {
3008 		slab = slab_to_discard;
3009 		slab_to_discard = slab_to_discard->next;
3010 
3011 		stat(s, DEACTIVATE_EMPTY);
3012 		discard_slab(s, slab);
3013 		stat(s, FREE_SLAB);
3014 	}
3015 }
3016 
3017 /*
3018  * Put all the cpu partial slabs to the node partial list.
3019  */
3020 static void put_partials(struct kmem_cache *s)
3021 {
3022 	struct slab *partial_slab;
3023 	unsigned long flags;
3024 
3025 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3026 	partial_slab = this_cpu_read(s->cpu_slab->partial);
3027 	this_cpu_write(s->cpu_slab->partial, NULL);
3028 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3029 
3030 	if (partial_slab)
3031 		__put_partials(s, partial_slab);
3032 }
3033 
3034 static void put_partials_cpu(struct kmem_cache *s,
3035 			     struct kmem_cache_cpu *c)
3036 {
3037 	struct slab *partial_slab;
3038 
3039 	partial_slab = slub_percpu_partial(c);
3040 	c->partial = NULL;
3041 
3042 	if (partial_slab)
3043 		__put_partials(s, partial_slab);
3044 }
3045 
3046 /*
3047  * Put a slab into a partial slab slot if available.
3048  *
3049  * If we did not find a slot then simply move all the partials to the
3050  * per node partial list.
3051  */
3052 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
3053 {
3054 	struct slab *oldslab;
3055 	struct slab *slab_to_put = NULL;
3056 	unsigned long flags;
3057 	int slabs = 0;
3058 
3059 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3060 
3061 	oldslab = this_cpu_read(s->cpu_slab->partial);
3062 
3063 	if (oldslab) {
3064 		if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
3065 			/*
3066 			 * Partial array is full. Move the existing set to the
3067 			 * per node partial list. Postpone the actual unfreezing
3068 			 * outside of the critical section.
3069 			 */
3070 			slab_to_put = oldslab;
3071 			oldslab = NULL;
3072 		} else {
3073 			slabs = oldslab->slabs;
3074 		}
3075 	}
3076 
3077 	slabs++;
3078 
3079 	slab->slabs = slabs;
3080 	slab->next = oldslab;
3081 
3082 	this_cpu_write(s->cpu_slab->partial, slab);
3083 
3084 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3085 
3086 	if (slab_to_put) {
3087 		__put_partials(s, slab_to_put);
3088 		stat(s, CPU_PARTIAL_DRAIN);
3089 	}
3090 }
3091 
3092 #else	/* CONFIG_SLUB_CPU_PARTIAL */
3093 
3094 static inline void put_partials(struct kmem_cache *s) { }
3095 static inline void put_partials_cpu(struct kmem_cache *s,
3096 				    struct kmem_cache_cpu *c) { }
3097 
3098 #endif	/* CONFIG_SLUB_CPU_PARTIAL */
3099 
3100 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
3101 {
3102 	unsigned long flags;
3103 	struct slab *slab;
3104 	void *freelist;
3105 
3106 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3107 
3108 	slab = c->slab;
3109 	freelist = c->freelist;
3110 
3111 	c->slab = NULL;
3112 	c->freelist = NULL;
3113 	c->tid = next_tid(c->tid);
3114 
3115 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3116 
3117 	if (slab) {
3118 		deactivate_slab(s, slab, freelist);
3119 		stat(s, CPUSLAB_FLUSH);
3120 	}
3121 }
3122 
3123 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
3124 {
3125 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3126 	void *freelist = c->freelist;
3127 	struct slab *slab = c->slab;
3128 
3129 	c->slab = NULL;
3130 	c->freelist = NULL;
3131 	c->tid = next_tid(c->tid);
3132 
3133 	if (slab) {
3134 		deactivate_slab(s, slab, freelist);
3135 		stat(s, CPUSLAB_FLUSH);
3136 	}
3137 
3138 	put_partials_cpu(s, c);
3139 }
3140 
3141 struct slub_flush_work {
3142 	struct work_struct work;
3143 	struct kmem_cache *s;
3144 	bool skip;
3145 };
3146 
3147 /*
3148  * Flush cpu slab.
3149  *
3150  * Called from CPU work handler with migration disabled.
3151  */
3152 static void flush_cpu_slab(struct work_struct *w)
3153 {
3154 	struct kmem_cache *s;
3155 	struct kmem_cache_cpu *c;
3156 	struct slub_flush_work *sfw;
3157 
3158 	sfw = container_of(w, struct slub_flush_work, work);
3159 
3160 	s = sfw->s;
3161 	c = this_cpu_ptr(s->cpu_slab);
3162 
3163 	if (c->slab)
3164 		flush_slab(s, c);
3165 
3166 	put_partials(s);
3167 }
3168 
3169 static bool has_cpu_slab(int cpu, struct kmem_cache *s)
3170 {
3171 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3172 
3173 	return c->slab || slub_percpu_partial(c);
3174 }
3175 
3176 static DEFINE_MUTEX(flush_lock);
3177 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
3178 
3179 static void flush_all_cpus_locked(struct kmem_cache *s)
3180 {
3181 	struct slub_flush_work *sfw;
3182 	unsigned int cpu;
3183 
3184 	lockdep_assert_cpus_held();
3185 	mutex_lock(&flush_lock);
3186 
3187 	for_each_online_cpu(cpu) {
3188 		sfw = &per_cpu(slub_flush, cpu);
3189 		if (!has_cpu_slab(cpu, s)) {
3190 			sfw->skip = true;
3191 			continue;
3192 		}
3193 		INIT_WORK(&sfw->work, flush_cpu_slab);
3194 		sfw->skip = false;
3195 		sfw->s = s;
3196 		queue_work_on(cpu, flushwq, &sfw->work);
3197 	}
3198 
3199 	for_each_online_cpu(cpu) {
3200 		sfw = &per_cpu(slub_flush, cpu);
3201 		if (sfw->skip)
3202 			continue;
3203 		flush_work(&sfw->work);
3204 	}
3205 
3206 	mutex_unlock(&flush_lock);
3207 }
3208 
3209 static void flush_all(struct kmem_cache *s)
3210 {
3211 	cpus_read_lock();
3212 	flush_all_cpus_locked(s);
3213 	cpus_read_unlock();
3214 }
3215 
3216 /*
3217  * Use the cpu notifier to insure that the cpu slabs are flushed when
3218  * necessary.
3219  */
3220 static int slub_cpu_dead(unsigned int cpu)
3221 {
3222 	struct kmem_cache *s;
3223 
3224 	mutex_lock(&slab_mutex);
3225 	list_for_each_entry(s, &slab_caches, list)
3226 		__flush_cpu_slab(s, cpu);
3227 	mutex_unlock(&slab_mutex);
3228 	return 0;
3229 }
3230 
3231 #else /* CONFIG_SLUB_TINY */
3232 static inline void flush_all_cpus_locked(struct kmem_cache *s) { }
3233 static inline void flush_all(struct kmem_cache *s) { }
3234 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
3235 static inline int slub_cpu_dead(unsigned int cpu) { return 0; }
3236 #endif /* CONFIG_SLUB_TINY */
3237 
3238 /*
3239  * Check if the objects in a per cpu structure fit numa
3240  * locality expectations.
3241  */
3242 static inline int node_match(struct slab *slab, int node)
3243 {
3244 #ifdef CONFIG_NUMA
3245 	if (node != NUMA_NO_NODE && slab_nid(slab) != node)
3246 		return 0;
3247 #endif
3248 	return 1;
3249 }
3250 
3251 #ifdef CONFIG_SLUB_DEBUG
3252 static int count_free(struct slab *slab)
3253 {
3254 	return slab->objects - slab->inuse;
3255 }
3256 
3257 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
3258 {
3259 	return atomic_long_read(&n->total_objects);
3260 }
3261 
3262 /* Supports checking bulk free of a constructed freelist */
3263 static inline bool free_debug_processing(struct kmem_cache *s,
3264 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
3265 	unsigned long addr, depot_stack_handle_t handle)
3266 {
3267 	bool checks_ok = false;
3268 	void *object = head;
3269 	int cnt = 0;
3270 
3271 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3272 		if (!check_slab(s, slab))
3273 			goto out;
3274 	}
3275 
3276 	if (slab->inuse < *bulk_cnt) {
3277 		slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
3278 			 slab->inuse, *bulk_cnt);
3279 		goto out;
3280 	}
3281 
3282 next_object:
3283 
3284 	if (++cnt > *bulk_cnt)
3285 		goto out_cnt;
3286 
3287 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3288 		if (!free_consistency_checks(s, slab, object, addr))
3289 			goto out;
3290 	}
3291 
3292 	if (s->flags & SLAB_STORE_USER)
3293 		set_track_update(s, object, TRACK_FREE, addr, handle);
3294 	trace(s, slab, object, 0);
3295 	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
3296 	init_object(s, object, SLUB_RED_INACTIVE);
3297 
3298 	/* Reached end of constructed freelist yet? */
3299 	if (object != tail) {
3300 		object = get_freepointer(s, object);
3301 		goto next_object;
3302 	}
3303 	checks_ok = true;
3304 
3305 out_cnt:
3306 	if (cnt != *bulk_cnt) {
3307 		slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3308 			 *bulk_cnt, cnt);
3309 		*bulk_cnt = cnt;
3310 	}
3311 
3312 out:
3313 
3314 	if (!checks_ok)
3315 		slab_fix(s, "Object at 0x%p not freed", object);
3316 
3317 	return checks_ok;
3318 }
3319 #endif /* CONFIG_SLUB_DEBUG */
3320 
3321 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
3322 static unsigned long count_partial(struct kmem_cache_node *n,
3323 					int (*get_count)(struct slab *))
3324 {
3325 	unsigned long flags;
3326 	unsigned long x = 0;
3327 	struct slab *slab;
3328 
3329 	spin_lock_irqsave(&n->list_lock, flags);
3330 	list_for_each_entry(slab, &n->partial, slab_list)
3331 		x += get_count(slab);
3332 	spin_unlock_irqrestore(&n->list_lock, flags);
3333 	return x;
3334 }
3335 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
3336 
3337 #ifdef CONFIG_SLUB_DEBUG
3338 #define MAX_PARTIAL_TO_SCAN 10000
3339 
3340 static unsigned long count_partial_free_approx(struct kmem_cache_node *n)
3341 {
3342 	unsigned long flags;
3343 	unsigned long x = 0;
3344 	struct slab *slab;
3345 
3346 	spin_lock_irqsave(&n->list_lock, flags);
3347 	if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
3348 		list_for_each_entry(slab, &n->partial, slab_list)
3349 			x += slab->objects - slab->inuse;
3350 	} else {
3351 		/*
3352 		 * For a long list, approximate the total count of objects in
3353 		 * it to meet the limit on the number of slabs to scan.
3354 		 * Scan from both the list's head and tail for better accuracy.
3355 		 */
3356 		unsigned long scanned = 0;
3357 
3358 		list_for_each_entry(slab, &n->partial, slab_list) {
3359 			x += slab->objects - slab->inuse;
3360 			if (++scanned == MAX_PARTIAL_TO_SCAN / 2)
3361 				break;
3362 		}
3363 		list_for_each_entry_reverse(slab, &n->partial, slab_list) {
3364 			x += slab->objects - slab->inuse;
3365 			if (++scanned == MAX_PARTIAL_TO_SCAN)
3366 				break;
3367 		}
3368 		x = mult_frac(x, n->nr_partial, scanned);
3369 		x = min(x, node_nr_objs(n));
3370 	}
3371 	spin_unlock_irqrestore(&n->list_lock, flags);
3372 	return x;
3373 }
3374 
3375 static noinline void
3376 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
3377 {
3378 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
3379 				      DEFAULT_RATELIMIT_BURST);
3380 	int node;
3381 	struct kmem_cache_node *n;
3382 
3383 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
3384 		return;
3385 
3386 	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
3387 		nid, gfpflags, &gfpflags);
3388 	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
3389 		s->name, s->object_size, s->size, oo_order(s->oo),
3390 		oo_order(s->min));
3391 
3392 	if (oo_order(s->min) > get_order(s->object_size))
3393 		pr_warn("  %s debugging increased min order, use slab_debug=O to disable.\n",
3394 			s->name);
3395 
3396 	for_each_kmem_cache_node(s, node, n) {
3397 		unsigned long nr_slabs;
3398 		unsigned long nr_objs;
3399 		unsigned long nr_free;
3400 
3401 		nr_free  = count_partial_free_approx(n);
3402 		nr_slabs = node_nr_slabs(n);
3403 		nr_objs  = node_nr_objs(n);
3404 
3405 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
3406 			node, nr_slabs, nr_objs, nr_free);
3407 	}
3408 }
3409 #else /* CONFIG_SLUB_DEBUG */
3410 static inline void
3411 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
3412 #endif
3413 
3414 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
3415 {
3416 	if (unlikely(slab_test_pfmemalloc(slab)))
3417 		return gfp_pfmemalloc_allowed(gfpflags);
3418 
3419 	return true;
3420 }
3421 
3422 #ifndef CONFIG_SLUB_TINY
3423 static inline bool
3424 __update_cpu_freelist_fast(struct kmem_cache *s,
3425 			   void *freelist_old, void *freelist_new,
3426 			   unsigned long tid)
3427 {
3428 	freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
3429 	freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
3430 
3431 	return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
3432 					     &old.full, new.full);
3433 }
3434 
3435 /*
3436  * Check the slab->freelist and either transfer the freelist to the
3437  * per cpu freelist or deactivate the slab.
3438  *
3439  * The slab is still frozen if the return value is not NULL.
3440  *
3441  * If this function returns NULL then the slab has been unfrozen.
3442  */
3443 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
3444 {
3445 	struct slab new;
3446 	unsigned long counters;
3447 	void *freelist;
3448 
3449 	lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3450 
3451 	do {
3452 		freelist = slab->freelist;
3453 		counters = slab->counters;
3454 
3455 		new.counters = counters;
3456 
3457 		new.inuse = slab->objects;
3458 		new.frozen = freelist != NULL;
3459 
3460 	} while (!__slab_update_freelist(s, slab,
3461 		freelist, counters,
3462 		NULL, new.counters,
3463 		"get_freelist"));
3464 
3465 	return freelist;
3466 }
3467 
3468 /*
3469  * Freeze the partial slab and return the pointer to the freelist.
3470  */
3471 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
3472 {
3473 	struct slab new;
3474 	unsigned long counters;
3475 	void *freelist;
3476 
3477 	do {
3478 		freelist = slab->freelist;
3479 		counters = slab->counters;
3480 
3481 		new.counters = counters;
3482 		VM_BUG_ON(new.frozen);
3483 
3484 		new.inuse = slab->objects;
3485 		new.frozen = 1;
3486 
3487 	} while (!slab_update_freelist(s, slab,
3488 		freelist, counters,
3489 		NULL, new.counters,
3490 		"freeze_slab"));
3491 
3492 	return freelist;
3493 }
3494 
3495 /*
3496  * Slow path. The lockless freelist is empty or we need to perform
3497  * debugging duties.
3498  *
3499  * Processing is still very fast if new objects have been freed to the
3500  * regular freelist. In that case we simply take over the regular freelist
3501  * as the lockless freelist and zap the regular freelist.
3502  *
3503  * If that is not working then we fall back to the partial lists. We take the
3504  * first element of the freelist as the object to allocate now and move the
3505  * rest of the freelist to the lockless freelist.
3506  *
3507  * And if we were unable to get a new slab from the partial slab lists then
3508  * we need to allocate a new slab. This is the slowest path since it involves
3509  * a call to the page allocator and the setup of a new slab.
3510  *
3511  * Version of __slab_alloc to use when we know that preemption is
3512  * already disabled (which is the case for bulk allocation).
3513  */
3514 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3515 			  unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3516 {
3517 	void *freelist;
3518 	struct slab *slab;
3519 	unsigned long flags;
3520 	struct partial_context pc;
3521 	bool try_thisnode = true;
3522 
3523 	stat(s, ALLOC_SLOWPATH);
3524 
3525 reread_slab:
3526 
3527 	slab = READ_ONCE(c->slab);
3528 	if (!slab) {
3529 		/*
3530 		 * if the node is not online or has no normal memory, just
3531 		 * ignore the node constraint
3532 		 */
3533 		if (unlikely(node != NUMA_NO_NODE &&
3534 			     !node_isset(node, slab_nodes)))
3535 			node = NUMA_NO_NODE;
3536 		goto new_slab;
3537 	}
3538 
3539 	if (unlikely(!node_match(slab, node))) {
3540 		/*
3541 		 * same as above but node_match() being false already
3542 		 * implies node != NUMA_NO_NODE
3543 		 */
3544 		if (!node_isset(node, slab_nodes)) {
3545 			node = NUMA_NO_NODE;
3546 		} else {
3547 			stat(s, ALLOC_NODE_MISMATCH);
3548 			goto deactivate_slab;
3549 		}
3550 	}
3551 
3552 	/*
3553 	 * By rights, we should be searching for a slab page that was
3554 	 * PFMEMALLOC but right now, we are losing the pfmemalloc
3555 	 * information when the page leaves the per-cpu allocator
3556 	 */
3557 	if (unlikely(!pfmemalloc_match(slab, gfpflags)))
3558 		goto deactivate_slab;
3559 
3560 	/* must check again c->slab in case we got preempted and it changed */
3561 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3562 	if (unlikely(slab != c->slab)) {
3563 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3564 		goto reread_slab;
3565 	}
3566 	freelist = c->freelist;
3567 	if (freelist)
3568 		goto load_freelist;
3569 
3570 	freelist = get_freelist(s, slab);
3571 
3572 	if (!freelist) {
3573 		c->slab = NULL;
3574 		c->tid = next_tid(c->tid);
3575 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3576 		stat(s, DEACTIVATE_BYPASS);
3577 		goto new_slab;
3578 	}
3579 
3580 	stat(s, ALLOC_REFILL);
3581 
3582 load_freelist:
3583 
3584 	lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3585 
3586 	/*
3587 	 * freelist is pointing to the list of objects to be used.
3588 	 * slab is pointing to the slab from which the objects are obtained.
3589 	 * That slab must be frozen for per cpu allocations to work.
3590 	 */
3591 	VM_BUG_ON(!c->slab->frozen);
3592 	c->freelist = get_freepointer(s, freelist);
3593 	c->tid = next_tid(c->tid);
3594 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3595 	return freelist;
3596 
3597 deactivate_slab:
3598 
3599 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3600 	if (slab != c->slab) {
3601 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3602 		goto reread_slab;
3603 	}
3604 	freelist = c->freelist;
3605 	c->slab = NULL;
3606 	c->freelist = NULL;
3607 	c->tid = next_tid(c->tid);
3608 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3609 	deactivate_slab(s, slab, freelist);
3610 
3611 new_slab:
3612 
3613 #ifdef CONFIG_SLUB_CPU_PARTIAL
3614 	while (slub_percpu_partial(c)) {
3615 		local_lock_irqsave(&s->cpu_slab->lock, flags);
3616 		if (unlikely(c->slab)) {
3617 			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3618 			goto reread_slab;
3619 		}
3620 		if (unlikely(!slub_percpu_partial(c))) {
3621 			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3622 			/* we were preempted and partial list got empty */
3623 			goto new_objects;
3624 		}
3625 
3626 		slab = slub_percpu_partial(c);
3627 		slub_set_percpu_partial(c, slab);
3628 
3629 		if (likely(node_match(slab, node) &&
3630 			   pfmemalloc_match(slab, gfpflags))) {
3631 			c->slab = slab;
3632 			freelist = get_freelist(s, slab);
3633 			VM_BUG_ON(!freelist);
3634 			stat(s, CPU_PARTIAL_ALLOC);
3635 			goto load_freelist;
3636 		}
3637 
3638 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3639 
3640 		slab->next = NULL;
3641 		__put_partials(s, slab);
3642 	}
3643 #endif
3644 
3645 new_objects:
3646 
3647 	pc.flags = gfpflags;
3648 	/*
3649 	 * When a preferred node is indicated but no __GFP_THISNODE
3650 	 *
3651 	 * 1) try to get a partial slab from target node only by having
3652 	 *    __GFP_THISNODE in pc.flags for get_partial()
3653 	 * 2) if 1) failed, try to allocate a new slab from target node with
3654 	 *    GPF_NOWAIT | __GFP_THISNODE opportunistically
3655 	 * 3) if 2) failed, retry with original gfpflags which will allow
3656 	 *    get_partial() try partial lists of other nodes before potentially
3657 	 *    allocating new page from other nodes
3658 	 */
3659 	if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
3660 		     && try_thisnode))
3661 		pc.flags = GFP_NOWAIT | __GFP_THISNODE;
3662 
3663 	pc.orig_size = orig_size;
3664 	slab = get_partial(s, node, &pc);
3665 	if (slab) {
3666 		if (kmem_cache_debug(s)) {
3667 			freelist = pc.object;
3668 			/*
3669 			 * For debug caches here we had to go through
3670 			 * alloc_single_from_partial() so just store the
3671 			 * tracking info and return the object.
3672 			 */
3673 			if (s->flags & SLAB_STORE_USER)
3674 				set_track(s, freelist, TRACK_ALLOC, addr);
3675 
3676 			return freelist;
3677 		}
3678 
3679 		freelist = freeze_slab(s, slab);
3680 		goto retry_load_slab;
3681 	}
3682 
3683 	slub_put_cpu_ptr(s->cpu_slab);
3684 	slab = new_slab(s, pc.flags, node);
3685 	c = slub_get_cpu_ptr(s->cpu_slab);
3686 
3687 	if (unlikely(!slab)) {
3688 		if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
3689 		    && try_thisnode) {
3690 			try_thisnode = false;
3691 			goto new_objects;
3692 		}
3693 		slab_out_of_memory(s, gfpflags, node);
3694 		return NULL;
3695 	}
3696 
3697 	stat(s, ALLOC_SLAB);
3698 
3699 	if (kmem_cache_debug(s)) {
3700 		freelist = alloc_single_from_new_slab(s, slab, orig_size);
3701 
3702 		if (unlikely(!freelist))
3703 			goto new_objects;
3704 
3705 		if (s->flags & SLAB_STORE_USER)
3706 			set_track(s, freelist, TRACK_ALLOC, addr);
3707 
3708 		return freelist;
3709 	}
3710 
3711 	/*
3712 	 * No other reference to the slab yet so we can
3713 	 * muck around with it freely without cmpxchg
3714 	 */
3715 	freelist = slab->freelist;
3716 	slab->freelist = NULL;
3717 	slab->inuse = slab->objects;
3718 	slab->frozen = 1;
3719 
3720 	inc_slabs_node(s, slab_nid(slab), slab->objects);
3721 
3722 	if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
3723 		/*
3724 		 * For !pfmemalloc_match() case we don't load freelist so that
3725 		 * we don't make further mismatched allocations easier.
3726 		 */
3727 		deactivate_slab(s, slab, get_freepointer(s, freelist));
3728 		return freelist;
3729 	}
3730 
3731 retry_load_slab:
3732 
3733 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3734 	if (unlikely(c->slab)) {
3735 		void *flush_freelist = c->freelist;
3736 		struct slab *flush_slab = c->slab;
3737 
3738 		c->slab = NULL;
3739 		c->freelist = NULL;
3740 		c->tid = next_tid(c->tid);
3741 
3742 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3743 
3744 		deactivate_slab(s, flush_slab, flush_freelist);
3745 
3746 		stat(s, CPUSLAB_FLUSH);
3747 
3748 		goto retry_load_slab;
3749 	}
3750 	c->slab = slab;
3751 
3752 	goto load_freelist;
3753 }
3754 
3755 /*
3756  * A wrapper for ___slab_alloc() for contexts where preemption is not yet
3757  * disabled. Compensates for possible cpu changes by refetching the per cpu area
3758  * pointer.
3759  */
3760 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3761 			  unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3762 {
3763 	void *p;
3764 
3765 #ifdef CONFIG_PREEMPT_COUNT
3766 	/*
3767 	 * We may have been preempted and rescheduled on a different
3768 	 * cpu before disabling preemption. Need to reload cpu area
3769 	 * pointer.
3770 	 */
3771 	c = slub_get_cpu_ptr(s->cpu_slab);
3772 #endif
3773 
3774 	p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
3775 #ifdef CONFIG_PREEMPT_COUNT
3776 	slub_put_cpu_ptr(s->cpu_slab);
3777 #endif
3778 	return p;
3779 }
3780 
3781 static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
3782 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3783 {
3784 	struct kmem_cache_cpu *c;
3785 	struct slab *slab;
3786 	unsigned long tid;
3787 	void *object;
3788 
3789 redo:
3790 	/*
3791 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
3792 	 * enabled. We may switch back and forth between cpus while
3793 	 * reading from one cpu area. That does not matter as long
3794 	 * as we end up on the original cpu again when doing the cmpxchg.
3795 	 *
3796 	 * We must guarantee that tid and kmem_cache_cpu are retrieved on the
3797 	 * same cpu. We read first the kmem_cache_cpu pointer and use it to read
3798 	 * the tid. If we are preempted and switched to another cpu between the
3799 	 * two reads, it's OK as the two are still associated with the same cpu
3800 	 * and cmpxchg later will validate the cpu.
3801 	 */
3802 	c = raw_cpu_ptr(s->cpu_slab);
3803 	tid = READ_ONCE(c->tid);
3804 
3805 	/*
3806 	 * Irqless object alloc/free algorithm used here depends on sequence
3807 	 * of fetching cpu_slab's data. tid should be fetched before anything
3808 	 * on c to guarantee that object and slab associated with previous tid
3809 	 * won't be used with current tid. If we fetch tid first, object and
3810 	 * slab could be one associated with next tid and our alloc/free
3811 	 * request will be failed. In this case, we will retry. So, no problem.
3812 	 */
3813 	barrier();
3814 
3815 	/*
3816 	 * The transaction ids are globally unique per cpu and per operation on
3817 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
3818 	 * occurs on the right processor and that there was no operation on the
3819 	 * linked list in between.
3820 	 */
3821 
3822 	object = c->freelist;
3823 	slab = c->slab;
3824 
3825 	if (!USE_LOCKLESS_FAST_PATH() ||
3826 	    unlikely(!object || !slab || !node_match(slab, node))) {
3827 		object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
3828 	} else {
3829 		void *next_object = get_freepointer_safe(s, object);
3830 
3831 		/*
3832 		 * The cmpxchg will only match if there was no additional
3833 		 * operation and if we are on the right processor.
3834 		 *
3835 		 * The cmpxchg does the following atomically (without lock
3836 		 * semantics!)
3837 		 * 1. Relocate first pointer to the current per cpu area.
3838 		 * 2. Verify that tid and freelist have not been changed
3839 		 * 3. If they were not changed replace tid and freelist
3840 		 *
3841 		 * Since this is without lock semantics the protection is only
3842 		 * against code executing on this cpu *not* from access by
3843 		 * other cpus.
3844 		 */
3845 		if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) {
3846 			note_cmpxchg_failure("slab_alloc", s, tid);
3847 			goto redo;
3848 		}
3849 		prefetch_freepointer(s, next_object);
3850 		stat(s, ALLOC_FASTPATH);
3851 	}
3852 
3853 	return object;
3854 }
3855 #else /* CONFIG_SLUB_TINY */
3856 static void *__slab_alloc_node(struct kmem_cache *s,
3857 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3858 {
3859 	struct partial_context pc;
3860 	struct slab *slab;
3861 	void *object;
3862 
3863 	pc.flags = gfpflags;
3864 	pc.orig_size = orig_size;
3865 	slab = get_partial(s, node, &pc);
3866 
3867 	if (slab)
3868 		return pc.object;
3869 
3870 	slab = new_slab(s, gfpflags, node);
3871 	if (unlikely(!slab)) {
3872 		slab_out_of_memory(s, gfpflags, node);
3873 		return NULL;
3874 	}
3875 
3876 	object = alloc_single_from_new_slab(s, slab, orig_size);
3877 
3878 	return object;
3879 }
3880 #endif /* CONFIG_SLUB_TINY */
3881 
3882 /*
3883  * If the object has been wiped upon free, make sure it's fully initialized by
3884  * zeroing out freelist pointer.
3885  */
3886 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
3887 						   void *obj)
3888 {
3889 	if (unlikely(slab_want_init_on_free(s)) && obj &&
3890 	    !freeptr_outside_object(s))
3891 		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
3892 			0, sizeof(void *));
3893 }
3894 
3895 noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
3896 {
3897 	if (__should_failslab(s, gfpflags))
3898 		return -ENOMEM;
3899 	return 0;
3900 }
3901 ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
3902 
3903 static __fastpath_inline
3904 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
3905 {
3906 	flags &= gfp_allowed_mask;
3907 
3908 	might_alloc(flags);
3909 
3910 	if (unlikely(should_failslab(s, flags)))
3911 		return NULL;
3912 
3913 	return s;
3914 }
3915 
3916 static __fastpath_inline
3917 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3918 			  gfp_t flags, size_t size, void **p, bool init,
3919 			  unsigned int orig_size)
3920 {
3921 	unsigned int zero_size = s->object_size;
3922 	struct slabobj_ext *obj_exts;
3923 	bool kasan_init = init;
3924 	size_t i;
3925 	gfp_t init_flags = flags & gfp_allowed_mask;
3926 
3927 	/*
3928 	 * For kmalloc object, the allocated memory size(object_size) is likely
3929 	 * larger than the requested size(orig_size). If redzone check is
3930 	 * enabled for the extra space, don't zero it, as it will be redzoned
3931 	 * soon. The redzone operation for this extra space could be seen as a
3932 	 * replacement of current poisoning under certain debug option, and
3933 	 * won't break other sanity checks.
3934 	 */
3935 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
3936 	    (s->flags & SLAB_KMALLOC))
3937 		zero_size = orig_size;
3938 
3939 	/*
3940 	 * When slab_debug is enabled, avoid memory initialization integrated
3941 	 * into KASAN and instead zero out the memory via the memset below with
3942 	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
3943 	 * cause false-positive reports. This does not lead to a performance
3944 	 * penalty on production builds, as slab_debug is not intended to be
3945 	 * enabled there.
3946 	 */
3947 	if (__slub_debug_enabled())
3948 		kasan_init = false;
3949 
3950 	/*
3951 	 * As memory initialization might be integrated into KASAN,
3952 	 * kasan_slab_alloc and initialization memset must be
3953 	 * kept together to avoid discrepancies in behavior.
3954 	 *
3955 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
3956 	 */
3957 	for (i = 0; i < size; i++) {
3958 		p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
3959 		if (p[i] && init && (!kasan_init ||
3960 				     !kasan_has_integrated_init()))
3961 			memset(p[i], 0, zero_size);
3962 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
3963 					 s->flags, init_flags);
3964 		kmsan_slab_alloc(s, p[i], init_flags);
3965 		if (need_slab_obj_ext()) {
3966 			obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]);
3967 #ifdef CONFIG_MEM_ALLOC_PROFILING
3968 			/*
3969 			 * Currently obj_exts is used only for allocation profiling.
3970 			 * If other users appear then mem_alloc_profiling_enabled()
3971 			 * check should be added before alloc_tag_add().
3972 			 */
3973 			if (likely(obj_exts))
3974 				alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
3975 #endif
3976 		}
3977 	}
3978 
3979 	return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
3980 }
3981 
3982 /*
3983  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
3984  * have the fastpath folded into their functions. So no function call
3985  * overhead for requests that can be satisfied on the fastpath.
3986  *
3987  * The fastpath works by first checking if the lockless freelist can be used.
3988  * If not then __slab_alloc is called for slow processing.
3989  *
3990  * Otherwise we can simply pick the next object from the lockless free list.
3991  */
3992 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
3993 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3994 {
3995 	void *object;
3996 	bool init = false;
3997 
3998 	s = slab_pre_alloc_hook(s, gfpflags);
3999 	if (unlikely(!s))
4000 		return NULL;
4001 
4002 	object = kfence_alloc(s, orig_size, gfpflags);
4003 	if (unlikely(object))
4004 		goto out;
4005 
4006 	object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
4007 
4008 	maybe_wipe_obj_freeptr(s, object);
4009 	init = slab_want_init_on_alloc(gfpflags, s);
4010 
4011 out:
4012 	/*
4013 	 * When init equals 'true', like for kzalloc() family, only
4014 	 * @orig_size bytes might be zeroed instead of s->object_size
4015 	 * In case this fails due to memcg_slab_post_alloc_hook(),
4016 	 * object is set to NULL
4017 	 */
4018 	slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
4019 
4020 	return object;
4021 }
4022 
4023 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
4024 {
4025 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
4026 				    s->object_size);
4027 
4028 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4029 
4030 	return ret;
4031 }
4032 EXPORT_SYMBOL(kmem_cache_alloc_noprof);
4033 
4034 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
4035 			   gfp_t gfpflags)
4036 {
4037 	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
4038 				    s->object_size);
4039 
4040 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4041 
4042 	return ret;
4043 }
4044 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
4045 
4046 /**
4047  * kmem_cache_alloc_node - Allocate an object on the specified node
4048  * @s: The cache to allocate from.
4049  * @gfpflags: See kmalloc().
4050  * @node: node number of the target node.
4051  *
4052  * Identical to kmem_cache_alloc but it will allocate memory on the given
4053  * node, which can improve the performance for cpu bound structures.
4054  *
4055  * Fallback to other node is possible if __GFP_THISNODE is not set.
4056  *
4057  * Return: pointer to the new object or %NULL in case of error
4058  */
4059 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
4060 {
4061 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4062 
4063 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
4064 
4065 	return ret;
4066 }
4067 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4068 
4069 /*
4070  * To avoid unnecessary overhead, we pass through large allocation requests
4071  * directly to the page allocator. We use __GFP_COMP, because we will need to
4072  * know the allocation order to free the pages properly in kfree.
4073  */
4074 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
4075 {
4076 	struct folio *folio;
4077 	void *ptr = NULL;
4078 	unsigned int order = get_order(size);
4079 
4080 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
4081 		flags = kmalloc_fix_flags(flags);
4082 
4083 	flags |= __GFP_COMP;
4084 	folio = (struct folio *)alloc_pages_node_noprof(node, flags, order);
4085 	if (folio) {
4086 		ptr = folio_address(folio);
4087 		lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4088 				      PAGE_SIZE << order);
4089 	}
4090 
4091 	ptr = kasan_kmalloc_large(ptr, size, flags);
4092 	/* As ptr might get tagged, call kmemleak hook after KASAN. */
4093 	kmemleak_alloc(ptr, size, 1, flags);
4094 	kmsan_kmalloc_large(ptr, size, flags);
4095 
4096 	return ptr;
4097 }
4098 
4099 void *__kmalloc_large_noprof(size_t size, gfp_t flags)
4100 {
4101 	void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
4102 
4103 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4104 		      flags, NUMA_NO_NODE);
4105 	return ret;
4106 }
4107 EXPORT_SYMBOL(__kmalloc_large_noprof);
4108 
4109 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
4110 {
4111 	void *ret = ___kmalloc_large_node(size, flags, node);
4112 
4113 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4114 		      flags, node);
4115 	return ret;
4116 }
4117 EXPORT_SYMBOL(__kmalloc_large_node_noprof);
4118 
4119 static __always_inline
4120 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
4121 			unsigned long caller)
4122 {
4123 	struct kmem_cache *s;
4124 	void *ret;
4125 
4126 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4127 		ret = __kmalloc_large_node_noprof(size, flags, node);
4128 		trace_kmalloc(caller, ret, size,
4129 			      PAGE_SIZE << get_order(size), flags, node);
4130 		return ret;
4131 	}
4132 
4133 	if (unlikely(!size))
4134 		return ZERO_SIZE_PTR;
4135 
4136 	s = kmalloc_slab(size, b, flags, caller);
4137 
4138 	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
4139 	ret = kasan_kmalloc(s, ret, size, flags);
4140 	trace_kmalloc(caller, ret, size, s->size, flags, node);
4141 	return ret;
4142 }
4143 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
4144 {
4145 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
4146 }
4147 EXPORT_SYMBOL(__kmalloc_node_noprof);
4148 
4149 void *__kmalloc_noprof(size_t size, gfp_t flags)
4150 {
4151 	return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
4152 }
4153 EXPORT_SYMBOL(__kmalloc_noprof);
4154 
4155 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
4156 					 int node, unsigned long caller)
4157 {
4158 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
4159 
4160 }
4161 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
4162 
4163 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
4164 {
4165 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
4166 					    _RET_IP_, size);
4167 
4168 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
4169 
4170 	ret = kasan_kmalloc(s, ret, size, gfpflags);
4171 	return ret;
4172 }
4173 EXPORT_SYMBOL(__kmalloc_cache_noprof);
4174 
4175 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
4176 				  int node, size_t size)
4177 {
4178 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
4179 
4180 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4181 
4182 	ret = kasan_kmalloc(s, ret, size, gfpflags);
4183 	return ret;
4184 }
4185 EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
4186 
4187 static noinline void free_to_partial_list(
4188 	struct kmem_cache *s, struct slab *slab,
4189 	void *head, void *tail, int bulk_cnt,
4190 	unsigned long addr)
4191 {
4192 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
4193 	struct slab *slab_free = NULL;
4194 	int cnt = bulk_cnt;
4195 	unsigned long flags;
4196 	depot_stack_handle_t handle = 0;
4197 
4198 	if (s->flags & SLAB_STORE_USER)
4199 		handle = set_track_prepare();
4200 
4201 	spin_lock_irqsave(&n->list_lock, flags);
4202 
4203 	if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
4204 		void *prior = slab->freelist;
4205 
4206 		/* Perform the actual freeing while we still hold the locks */
4207 		slab->inuse -= cnt;
4208 		set_freepointer(s, tail, prior);
4209 		slab->freelist = head;
4210 
4211 		/*
4212 		 * If the slab is empty, and node's partial list is full,
4213 		 * it should be discarded anyway no matter it's on full or
4214 		 * partial list.
4215 		 */
4216 		if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
4217 			slab_free = slab;
4218 
4219 		if (!prior) {
4220 			/* was on full list */
4221 			remove_full(s, n, slab);
4222 			if (!slab_free) {
4223 				add_partial(n, slab, DEACTIVATE_TO_TAIL);
4224 				stat(s, FREE_ADD_PARTIAL);
4225 			}
4226 		} else if (slab_free) {
4227 			remove_partial(n, slab);
4228 			stat(s, FREE_REMOVE_PARTIAL);
4229 		}
4230 	}
4231 
4232 	if (slab_free) {
4233 		/*
4234 		 * Update the counters while still holding n->list_lock to
4235 		 * prevent spurious validation warnings
4236 		 */
4237 		dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
4238 	}
4239 
4240 	spin_unlock_irqrestore(&n->list_lock, flags);
4241 
4242 	if (slab_free) {
4243 		stat(s, FREE_SLAB);
4244 		free_slab(s, slab_free);
4245 	}
4246 }
4247 
4248 /*
4249  * Slow path handling. This may still be called frequently since objects
4250  * have a longer lifetime than the cpu slabs in most processing loads.
4251  *
4252  * So we still attempt to reduce cache line usage. Just take the slab
4253  * lock and free the item. If there is no additional partial slab
4254  * handling required then we can return immediately.
4255  */
4256 static void __slab_free(struct kmem_cache *s, struct slab *slab,
4257 			void *head, void *tail, int cnt,
4258 			unsigned long addr)
4259 
4260 {
4261 	void *prior;
4262 	int was_frozen;
4263 	struct slab new;
4264 	unsigned long counters;
4265 	struct kmem_cache_node *n = NULL;
4266 	unsigned long flags;
4267 	bool on_node_partial;
4268 
4269 	stat(s, FREE_SLOWPATH);
4270 
4271 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4272 		free_to_partial_list(s, slab, head, tail, cnt, addr);
4273 		return;
4274 	}
4275 
4276 	do {
4277 		if (unlikely(n)) {
4278 			spin_unlock_irqrestore(&n->list_lock, flags);
4279 			n = NULL;
4280 		}
4281 		prior = slab->freelist;
4282 		counters = slab->counters;
4283 		set_freepointer(s, tail, prior);
4284 		new.counters = counters;
4285 		was_frozen = new.frozen;
4286 		new.inuse -= cnt;
4287 		if ((!new.inuse || !prior) && !was_frozen) {
4288 			/* Needs to be taken off a list */
4289 			if (!kmem_cache_has_cpu_partial(s) || prior) {
4290 
4291 				n = get_node(s, slab_nid(slab));
4292 				/*
4293 				 * Speculatively acquire the list_lock.
4294 				 * If the cmpxchg does not succeed then we may
4295 				 * drop the list_lock without any processing.
4296 				 *
4297 				 * Otherwise the list_lock will synchronize with
4298 				 * other processors updating the list of slabs.
4299 				 */
4300 				spin_lock_irqsave(&n->list_lock, flags);
4301 
4302 				on_node_partial = slab_test_node_partial(slab);
4303 			}
4304 		}
4305 
4306 	} while (!slab_update_freelist(s, slab,
4307 		prior, counters,
4308 		head, new.counters,
4309 		"__slab_free"));
4310 
4311 	if (likely(!n)) {
4312 
4313 		if (likely(was_frozen)) {
4314 			/*
4315 			 * The list lock was not taken therefore no list
4316 			 * activity can be necessary.
4317 			 */
4318 			stat(s, FREE_FROZEN);
4319 		} else if (kmem_cache_has_cpu_partial(s) && !prior) {
4320 			/*
4321 			 * If we started with a full slab then put it onto the
4322 			 * per cpu partial list.
4323 			 */
4324 			put_cpu_partial(s, slab, 1);
4325 			stat(s, CPU_PARTIAL_FREE);
4326 		}
4327 
4328 		return;
4329 	}
4330 
4331 	/*
4332 	 * This slab was partially empty but not on the per-node partial list,
4333 	 * in which case we shouldn't manipulate its list, just return.
4334 	 */
4335 	if (prior && !on_node_partial) {
4336 		spin_unlock_irqrestore(&n->list_lock, flags);
4337 		return;
4338 	}
4339 
4340 	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
4341 		goto slab_empty;
4342 
4343 	/*
4344 	 * Objects left in the slab. If it was not on the partial list before
4345 	 * then add it.
4346 	 */
4347 	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
4348 		add_partial(n, slab, DEACTIVATE_TO_TAIL);
4349 		stat(s, FREE_ADD_PARTIAL);
4350 	}
4351 	spin_unlock_irqrestore(&n->list_lock, flags);
4352 	return;
4353 
4354 slab_empty:
4355 	if (prior) {
4356 		/*
4357 		 * Slab on the partial list.
4358 		 */
4359 		remove_partial(n, slab);
4360 		stat(s, FREE_REMOVE_PARTIAL);
4361 	}
4362 
4363 	spin_unlock_irqrestore(&n->list_lock, flags);
4364 	stat(s, FREE_SLAB);
4365 	discard_slab(s, slab);
4366 }
4367 
4368 #ifndef CONFIG_SLUB_TINY
4369 /*
4370  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
4371  * can perform fastpath freeing without additional function calls.
4372  *
4373  * The fastpath is only possible if we are freeing to the current cpu slab
4374  * of this processor. This typically the case if we have just allocated
4375  * the item before.
4376  *
4377  * If fastpath is not possible then fall back to __slab_free where we deal
4378  * with all sorts of special processing.
4379  *
4380  * Bulk free of a freelist with several objects (all pointing to the
4381  * same slab) possible by specifying head and tail ptr, plus objects
4382  * count (cnt). Bulk free indicated by tail pointer being set.
4383  */
4384 static __always_inline void do_slab_free(struct kmem_cache *s,
4385 				struct slab *slab, void *head, void *tail,
4386 				int cnt, unsigned long addr)
4387 {
4388 	struct kmem_cache_cpu *c;
4389 	unsigned long tid;
4390 	void **freelist;
4391 
4392 redo:
4393 	/*
4394 	 * Determine the currently cpus per cpu slab.
4395 	 * The cpu may change afterward. However that does not matter since
4396 	 * data is retrieved via this pointer. If we are on the same cpu
4397 	 * during the cmpxchg then the free will succeed.
4398 	 */
4399 	c = raw_cpu_ptr(s->cpu_slab);
4400 	tid = READ_ONCE(c->tid);
4401 
4402 	/* Same with comment on barrier() in __slab_alloc_node() */
4403 	barrier();
4404 
4405 	if (unlikely(slab != c->slab)) {
4406 		__slab_free(s, slab, head, tail, cnt, addr);
4407 		return;
4408 	}
4409 
4410 	if (USE_LOCKLESS_FAST_PATH()) {
4411 		freelist = READ_ONCE(c->freelist);
4412 
4413 		set_freepointer(s, tail, freelist);
4414 
4415 		if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
4416 			note_cmpxchg_failure("slab_free", s, tid);
4417 			goto redo;
4418 		}
4419 	} else {
4420 		/* Update the free list under the local lock */
4421 		local_lock(&s->cpu_slab->lock);
4422 		c = this_cpu_ptr(s->cpu_slab);
4423 		if (unlikely(slab != c->slab)) {
4424 			local_unlock(&s->cpu_slab->lock);
4425 			goto redo;
4426 		}
4427 		tid = c->tid;
4428 		freelist = c->freelist;
4429 
4430 		set_freepointer(s, tail, freelist);
4431 		c->freelist = head;
4432 		c->tid = next_tid(tid);
4433 
4434 		local_unlock(&s->cpu_slab->lock);
4435 	}
4436 	stat_add(s, FREE_FASTPATH, cnt);
4437 }
4438 #else /* CONFIG_SLUB_TINY */
4439 static void do_slab_free(struct kmem_cache *s,
4440 				struct slab *slab, void *head, void *tail,
4441 				int cnt, unsigned long addr)
4442 {
4443 	__slab_free(s, slab, head, tail, cnt, addr);
4444 }
4445 #endif /* CONFIG_SLUB_TINY */
4446 
4447 static __fastpath_inline
4448 void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
4449 	       unsigned long addr)
4450 {
4451 	memcg_slab_free_hook(s, slab, &object, 1);
4452 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
4453 
4454 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
4455 		do_slab_free(s, slab, object, object, 1, addr);
4456 }
4457 
4458 #ifdef CONFIG_MEMCG_KMEM
4459 /* Do not inline the rare memcg charging failed path into the allocation path */
4460 static noinline
4461 void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
4462 {
4463 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
4464 		do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
4465 }
4466 #endif
4467 
4468 static __fastpath_inline
4469 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
4470 		    void *tail, void **p, int cnt, unsigned long addr)
4471 {
4472 	memcg_slab_free_hook(s, slab, p, cnt);
4473 	alloc_tagging_slab_free_hook(s, slab, p, cnt);
4474 	/*
4475 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
4476 	 * to remove objects, whose reuse must be delayed.
4477 	 */
4478 	if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt)))
4479 		do_slab_free(s, slab, head, tail, cnt, addr);
4480 }
4481 
4482 #ifdef CONFIG_KASAN_GENERIC
4483 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
4484 {
4485 	do_slab_free(cache, virt_to_slab(x), x, x, 1, addr);
4486 }
4487 #endif
4488 
4489 static inline struct kmem_cache *virt_to_cache(const void *obj)
4490 {
4491 	struct slab *slab;
4492 
4493 	slab = virt_to_slab(obj);
4494 	if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
4495 		return NULL;
4496 	return slab->slab_cache;
4497 }
4498 
4499 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
4500 {
4501 	struct kmem_cache *cachep;
4502 
4503 	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
4504 	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
4505 		return s;
4506 
4507 	cachep = virt_to_cache(x);
4508 	if (WARN(cachep && cachep != s,
4509 		 "%s: Wrong slab cache. %s but object is from %s\n",
4510 		 __func__, s->name, cachep->name))
4511 		print_tracking(cachep, x);
4512 	return cachep;
4513 }
4514 
4515 /**
4516  * kmem_cache_free - Deallocate an object
4517  * @s: The cache the allocation was from.
4518  * @x: The previously allocated object.
4519  *
4520  * Free an object which was previously allocated from this
4521  * cache.
4522  */
4523 void kmem_cache_free(struct kmem_cache *s, void *x)
4524 {
4525 	s = cache_from_obj(s, x);
4526 	if (!s)
4527 		return;
4528 	trace_kmem_cache_free(_RET_IP_, x, s);
4529 	slab_free(s, virt_to_slab(x), x, _RET_IP_);
4530 }
4531 EXPORT_SYMBOL(kmem_cache_free);
4532 
4533 static void free_large_kmalloc(struct folio *folio, void *object)
4534 {
4535 	unsigned int order = folio_order(folio);
4536 
4537 	if (WARN_ON_ONCE(order == 0))
4538 		pr_warn_once("object pointer: 0x%p\n", object);
4539 
4540 	kmemleak_free(object);
4541 	kasan_kfree_large(object);
4542 	kmsan_kfree_large(object);
4543 
4544 	lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4545 			      -(PAGE_SIZE << order));
4546 	folio_put(folio);
4547 }
4548 
4549 /**
4550  * kfree - free previously allocated memory
4551  * @object: pointer returned by kmalloc() or kmem_cache_alloc()
4552  *
4553  * If @object is NULL, no operation is performed.
4554  */
4555 void kfree(const void *object)
4556 {
4557 	struct folio *folio;
4558 	struct slab *slab;
4559 	struct kmem_cache *s;
4560 	void *x = (void *)object;
4561 
4562 	trace_kfree(_RET_IP_, object);
4563 
4564 	if (unlikely(ZERO_OR_NULL_PTR(object)))
4565 		return;
4566 
4567 	folio = virt_to_folio(object);
4568 	if (unlikely(!folio_test_slab(folio))) {
4569 		free_large_kmalloc(folio, (void *)object);
4570 		return;
4571 	}
4572 
4573 	slab = folio_slab(folio);
4574 	s = slab->slab_cache;
4575 	slab_free(s, slab, x, _RET_IP_);
4576 }
4577 EXPORT_SYMBOL(kfree);
4578 
4579 struct detached_freelist {
4580 	struct slab *slab;
4581 	void *tail;
4582 	void *freelist;
4583 	int cnt;
4584 	struct kmem_cache *s;
4585 };
4586 
4587 /*
4588  * This function progressively scans the array with free objects (with
4589  * a limited look ahead) and extract objects belonging to the same
4590  * slab.  It builds a detached freelist directly within the given
4591  * slab/objects.  This can happen without any need for
4592  * synchronization, because the objects are owned by running process.
4593  * The freelist is build up as a single linked list in the objects.
4594  * The idea is, that this detached freelist can then be bulk
4595  * transferred to the real freelist(s), but only requiring a single
4596  * synchronization primitive.  Look ahead in the array is limited due
4597  * to performance reasons.
4598  */
4599 static inline
4600 int build_detached_freelist(struct kmem_cache *s, size_t size,
4601 			    void **p, struct detached_freelist *df)
4602 {
4603 	int lookahead = 3;
4604 	void *object;
4605 	struct folio *folio;
4606 	size_t same;
4607 
4608 	object = p[--size];
4609 	folio = virt_to_folio(object);
4610 	if (!s) {
4611 		/* Handle kalloc'ed objects */
4612 		if (unlikely(!folio_test_slab(folio))) {
4613 			free_large_kmalloc(folio, object);
4614 			df->slab = NULL;
4615 			return size;
4616 		}
4617 		/* Derive kmem_cache from object */
4618 		df->slab = folio_slab(folio);
4619 		df->s = df->slab->slab_cache;
4620 	} else {
4621 		df->slab = folio_slab(folio);
4622 		df->s = cache_from_obj(s, object); /* Support for memcg */
4623 	}
4624 
4625 	/* Start new detached freelist */
4626 	df->tail = object;
4627 	df->freelist = object;
4628 	df->cnt = 1;
4629 
4630 	if (is_kfence_address(object))
4631 		return size;
4632 
4633 	set_freepointer(df->s, object, NULL);
4634 
4635 	same = size;
4636 	while (size) {
4637 		object = p[--size];
4638 		/* df->slab is always set at this point */
4639 		if (df->slab == virt_to_slab(object)) {
4640 			/* Opportunity build freelist */
4641 			set_freepointer(df->s, object, df->freelist);
4642 			df->freelist = object;
4643 			df->cnt++;
4644 			same--;
4645 			if (size != same)
4646 				swap(p[size], p[same]);
4647 			continue;
4648 		}
4649 
4650 		/* Limit look ahead search */
4651 		if (!--lookahead)
4652 			break;
4653 	}
4654 
4655 	return same;
4656 }
4657 
4658 /*
4659  * Internal bulk free of objects that were not initialised by the post alloc
4660  * hooks and thus should not be processed by the free hooks
4661  */
4662 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4663 {
4664 	if (!size)
4665 		return;
4666 
4667 	do {
4668 		struct detached_freelist df;
4669 
4670 		size = build_detached_freelist(s, size, p, &df);
4671 		if (!df.slab)
4672 			continue;
4673 
4674 		do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
4675 			     _RET_IP_);
4676 	} while (likely(size));
4677 }
4678 
4679 /* Note that interrupts must be enabled when calling this function. */
4680 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4681 {
4682 	if (!size)
4683 		return;
4684 
4685 	do {
4686 		struct detached_freelist df;
4687 
4688 		size = build_detached_freelist(s, size, p, &df);
4689 		if (!df.slab)
4690 			continue;
4691 
4692 		slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
4693 			       df.cnt, _RET_IP_);
4694 	} while (likely(size));
4695 }
4696 EXPORT_SYMBOL(kmem_cache_free_bulk);
4697 
4698 #ifndef CONFIG_SLUB_TINY
4699 static inline
4700 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
4701 			    void **p)
4702 {
4703 	struct kmem_cache_cpu *c;
4704 	unsigned long irqflags;
4705 	int i;
4706 
4707 	/*
4708 	 * Drain objects in the per cpu slab, while disabling local
4709 	 * IRQs, which protects against PREEMPT and interrupts
4710 	 * handlers invoking normal fastpath.
4711 	 */
4712 	c = slub_get_cpu_ptr(s->cpu_slab);
4713 	local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4714 
4715 	for (i = 0; i < size; i++) {
4716 		void *object = kfence_alloc(s, s->object_size, flags);
4717 
4718 		if (unlikely(object)) {
4719 			p[i] = object;
4720 			continue;
4721 		}
4722 
4723 		object = c->freelist;
4724 		if (unlikely(!object)) {
4725 			/*
4726 			 * We may have removed an object from c->freelist using
4727 			 * the fastpath in the previous iteration; in that case,
4728 			 * c->tid has not been bumped yet.
4729 			 * Since ___slab_alloc() may reenable interrupts while
4730 			 * allocating memory, we should bump c->tid now.
4731 			 */
4732 			c->tid = next_tid(c->tid);
4733 
4734 			local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4735 
4736 			/*
4737 			 * Invoking slow path likely have side-effect
4738 			 * of re-populating per CPU c->freelist
4739 			 */
4740 			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
4741 					    _RET_IP_, c, s->object_size);
4742 			if (unlikely(!p[i]))
4743 				goto error;
4744 
4745 			c = this_cpu_ptr(s->cpu_slab);
4746 			maybe_wipe_obj_freeptr(s, p[i]);
4747 
4748 			local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4749 
4750 			continue; /* goto for-loop */
4751 		}
4752 		c->freelist = get_freepointer(s, object);
4753 		p[i] = object;
4754 		maybe_wipe_obj_freeptr(s, p[i]);
4755 		stat(s, ALLOC_FASTPATH);
4756 	}
4757 	c->tid = next_tid(c->tid);
4758 	local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4759 	slub_put_cpu_ptr(s->cpu_slab);
4760 
4761 	return i;
4762 
4763 error:
4764 	slub_put_cpu_ptr(s->cpu_slab);
4765 	__kmem_cache_free_bulk(s, i, p);
4766 	return 0;
4767 
4768 }
4769 #else /* CONFIG_SLUB_TINY */
4770 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
4771 				   size_t size, void **p)
4772 {
4773 	int i;
4774 
4775 	for (i = 0; i < size; i++) {
4776 		void *object = kfence_alloc(s, s->object_size, flags);
4777 
4778 		if (unlikely(object)) {
4779 			p[i] = object;
4780 			continue;
4781 		}
4782 
4783 		p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
4784 					 _RET_IP_, s->object_size);
4785 		if (unlikely(!p[i]))
4786 			goto error;
4787 
4788 		maybe_wipe_obj_freeptr(s, p[i]);
4789 	}
4790 
4791 	return i;
4792 
4793 error:
4794 	__kmem_cache_free_bulk(s, i, p);
4795 	return 0;
4796 }
4797 #endif /* CONFIG_SLUB_TINY */
4798 
4799 /* Note that interrupts must be enabled when calling this function. */
4800 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
4801 				 void **p)
4802 {
4803 	int i;
4804 
4805 	if (!size)
4806 		return 0;
4807 
4808 	s = slab_pre_alloc_hook(s, flags);
4809 	if (unlikely(!s))
4810 		return 0;
4811 
4812 	i = __kmem_cache_alloc_bulk(s, flags, size, p);
4813 	if (unlikely(i == 0))
4814 		return 0;
4815 
4816 	/*
4817 	 * memcg and kmem_cache debug support and memory initialization.
4818 	 * Done outside of the IRQ disabled fastpath loop.
4819 	 */
4820 	if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
4821 		    slab_want_init_on_alloc(flags, s), s->object_size))) {
4822 		return 0;
4823 	}
4824 	return i;
4825 }
4826 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
4827 
4828 
4829 /*
4830  * Object placement in a slab is made very easy because we always start at
4831  * offset 0. If we tune the size of the object to the alignment then we can
4832  * get the required alignment by putting one properly sized object after
4833  * another.
4834  *
4835  * Notice that the allocation order determines the sizes of the per cpu
4836  * caches. Each processor has always one slab available for allocations.
4837  * Increasing the allocation order reduces the number of times that slabs
4838  * must be moved on and off the partial lists and is therefore a factor in
4839  * locking overhead.
4840  */
4841 
4842 /*
4843  * Minimum / Maximum order of slab pages. This influences locking overhead
4844  * and slab fragmentation. A higher order reduces the number of partial slabs
4845  * and increases the number of allocations possible without having to
4846  * take the list_lock.
4847  */
4848 static unsigned int slub_min_order;
4849 static unsigned int slub_max_order =
4850 	IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
4851 static unsigned int slub_min_objects;
4852 
4853 /*
4854  * Calculate the order of allocation given an slab object size.
4855  *
4856  * The order of allocation has significant impact on performance and other
4857  * system components. Generally order 0 allocations should be preferred since
4858  * order 0 does not cause fragmentation in the page allocator. Larger objects
4859  * be problematic to put into order 0 slabs because there may be too much
4860  * unused space left. We go to a higher order if more than 1/16th of the slab
4861  * would be wasted.
4862  *
4863  * In order to reach satisfactory performance we must ensure that a minimum
4864  * number of objects is in one slab. Otherwise we may generate too much
4865  * activity on the partial lists which requires taking the list_lock. This is
4866  * less a concern for large slabs though which are rarely used.
4867  *
4868  * slab_max_order specifies the order where we begin to stop considering the
4869  * number of objects in a slab as critical. If we reach slab_max_order then
4870  * we try to keep the page order as low as possible. So we accept more waste
4871  * of space in favor of a small page order.
4872  *
4873  * Higher order allocations also allow the placement of more objects in a
4874  * slab and thereby reduce object handling overhead. If the user has
4875  * requested a higher minimum order then we start with that one instead of
4876  * the smallest order which will fit the object.
4877  */
4878 static inline unsigned int calc_slab_order(unsigned int size,
4879 		unsigned int min_order, unsigned int max_order,
4880 		unsigned int fract_leftover)
4881 {
4882 	unsigned int order;
4883 
4884 	for (order = min_order; order <= max_order; order++) {
4885 
4886 		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
4887 		unsigned int rem;
4888 
4889 		rem = slab_size % size;
4890 
4891 		if (rem <= slab_size / fract_leftover)
4892 			break;
4893 	}
4894 
4895 	return order;
4896 }
4897 
4898 static inline int calculate_order(unsigned int size)
4899 {
4900 	unsigned int order;
4901 	unsigned int min_objects;
4902 	unsigned int max_objects;
4903 	unsigned int min_order;
4904 
4905 	min_objects = slub_min_objects;
4906 	if (!min_objects) {
4907 		/*
4908 		 * Some architectures will only update present cpus when
4909 		 * onlining them, so don't trust the number if it's just 1. But
4910 		 * we also don't want to use nr_cpu_ids always, as on some other
4911 		 * architectures, there can be many possible cpus, but never
4912 		 * onlined. Here we compromise between trying to avoid too high
4913 		 * order on systems that appear larger than they are, and too
4914 		 * low order on systems that appear smaller than they are.
4915 		 */
4916 		unsigned int nr_cpus = num_present_cpus();
4917 		if (nr_cpus <= 1)
4918 			nr_cpus = nr_cpu_ids;
4919 		min_objects = 4 * (fls(nr_cpus) + 1);
4920 	}
4921 	/* min_objects can't be 0 because get_order(0) is undefined */
4922 	max_objects = max(order_objects(slub_max_order, size), 1U);
4923 	min_objects = min(min_objects, max_objects);
4924 
4925 	min_order = max_t(unsigned int, slub_min_order,
4926 			  get_order(min_objects * size));
4927 	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
4928 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
4929 
4930 	/*
4931 	 * Attempt to find best configuration for a slab. This works by first
4932 	 * attempting to generate a layout with the best possible configuration
4933 	 * and backing off gradually.
4934 	 *
4935 	 * We start with accepting at most 1/16 waste and try to find the
4936 	 * smallest order from min_objects-derived/slab_min_order up to
4937 	 * slab_max_order that will satisfy the constraint. Note that increasing
4938 	 * the order can only result in same or less fractional waste, not more.
4939 	 *
4940 	 * If that fails, we increase the acceptable fraction of waste and try
4941 	 * again. The last iteration with fraction of 1/2 would effectively
4942 	 * accept any waste and give us the order determined by min_objects, as
4943 	 * long as at least single object fits within slab_max_order.
4944 	 */
4945 	for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
4946 		order = calc_slab_order(size, min_order, slub_max_order,
4947 					fraction);
4948 		if (order <= slub_max_order)
4949 			return order;
4950 	}
4951 
4952 	/*
4953 	 * Doh this slab cannot be placed using slab_max_order.
4954 	 */
4955 	order = get_order(size);
4956 	if (order <= MAX_PAGE_ORDER)
4957 		return order;
4958 	return -ENOSYS;
4959 }
4960 
4961 static void
4962 init_kmem_cache_node(struct kmem_cache_node *n)
4963 {
4964 	n->nr_partial = 0;
4965 	spin_lock_init(&n->list_lock);
4966 	INIT_LIST_HEAD(&n->partial);
4967 #ifdef CONFIG_SLUB_DEBUG
4968 	atomic_long_set(&n->nr_slabs, 0);
4969 	atomic_long_set(&n->total_objects, 0);
4970 	INIT_LIST_HEAD(&n->full);
4971 #endif
4972 }
4973 
4974 #ifndef CONFIG_SLUB_TINY
4975 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4976 {
4977 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
4978 			NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
4979 			sizeof(struct kmem_cache_cpu));
4980 
4981 	/*
4982 	 * Must align to double word boundary for the double cmpxchg
4983 	 * instructions to work; see __pcpu_double_call_return_bool().
4984 	 */
4985 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
4986 				     2 * sizeof(void *));
4987 
4988 	if (!s->cpu_slab)
4989 		return 0;
4990 
4991 	init_kmem_cache_cpus(s);
4992 
4993 	return 1;
4994 }
4995 #else
4996 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4997 {
4998 	return 1;
4999 }
5000 #endif /* CONFIG_SLUB_TINY */
5001 
5002 static struct kmem_cache *kmem_cache_node;
5003 
5004 /*
5005  * No kmalloc_node yet so do it by hand. We know that this is the first
5006  * slab on the node for this slabcache. There are no concurrent accesses
5007  * possible.
5008  *
5009  * Note that this function only works on the kmem_cache_node
5010  * when allocating for the kmem_cache_node. This is used for bootstrapping
5011  * memory on a fresh node that has no slab structures yet.
5012  */
5013 static void early_kmem_cache_node_alloc(int node)
5014 {
5015 	struct slab *slab;
5016 	struct kmem_cache_node *n;
5017 
5018 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
5019 
5020 	slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
5021 
5022 	BUG_ON(!slab);
5023 	if (slab_nid(slab) != node) {
5024 		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
5025 		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
5026 	}
5027 
5028 	n = slab->freelist;
5029 	BUG_ON(!n);
5030 #ifdef CONFIG_SLUB_DEBUG
5031 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
5032 #endif
5033 	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
5034 	slab->freelist = get_freepointer(kmem_cache_node, n);
5035 	slab->inuse = 1;
5036 	kmem_cache_node->node[node] = n;
5037 	init_kmem_cache_node(n);
5038 	inc_slabs_node(kmem_cache_node, node, slab->objects);
5039 
5040 	/*
5041 	 * No locks need to be taken here as it has just been
5042 	 * initialized and there is no concurrent access.
5043 	 */
5044 	__add_partial(n, slab, DEACTIVATE_TO_HEAD);
5045 }
5046 
5047 static void free_kmem_cache_nodes(struct kmem_cache *s)
5048 {
5049 	int node;
5050 	struct kmem_cache_node *n;
5051 
5052 	for_each_kmem_cache_node(s, node, n) {
5053 		s->node[node] = NULL;
5054 		kmem_cache_free(kmem_cache_node, n);
5055 	}
5056 }
5057 
5058 void __kmem_cache_release(struct kmem_cache *s)
5059 {
5060 	cache_random_seq_destroy(s);
5061 #ifndef CONFIG_SLUB_TINY
5062 	free_percpu(s->cpu_slab);
5063 #endif
5064 	free_kmem_cache_nodes(s);
5065 }
5066 
5067 static int init_kmem_cache_nodes(struct kmem_cache *s)
5068 {
5069 	int node;
5070 
5071 	for_each_node_mask(node, slab_nodes) {
5072 		struct kmem_cache_node *n;
5073 
5074 		if (slab_state == DOWN) {
5075 			early_kmem_cache_node_alloc(node);
5076 			continue;
5077 		}
5078 		n = kmem_cache_alloc_node(kmem_cache_node,
5079 						GFP_KERNEL, node);
5080 
5081 		if (!n) {
5082 			free_kmem_cache_nodes(s);
5083 			return 0;
5084 		}
5085 
5086 		init_kmem_cache_node(n);
5087 		s->node[node] = n;
5088 	}
5089 	return 1;
5090 }
5091 
5092 static void set_cpu_partial(struct kmem_cache *s)
5093 {
5094 #ifdef CONFIG_SLUB_CPU_PARTIAL
5095 	unsigned int nr_objects;
5096 
5097 	/*
5098 	 * cpu_partial determined the maximum number of objects kept in the
5099 	 * per cpu partial lists of a processor.
5100 	 *
5101 	 * Per cpu partial lists mainly contain slabs that just have one
5102 	 * object freed. If they are used for allocation then they can be
5103 	 * filled up again with minimal effort. The slab will never hit the
5104 	 * per node partial lists and therefore no locking will be required.
5105 	 *
5106 	 * For backwards compatibility reasons, this is determined as number
5107 	 * of objects, even though we now limit maximum number of pages, see
5108 	 * slub_set_cpu_partial()
5109 	 */
5110 	if (!kmem_cache_has_cpu_partial(s))
5111 		nr_objects = 0;
5112 	else if (s->size >= PAGE_SIZE)
5113 		nr_objects = 6;
5114 	else if (s->size >= 1024)
5115 		nr_objects = 24;
5116 	else if (s->size >= 256)
5117 		nr_objects = 52;
5118 	else
5119 		nr_objects = 120;
5120 
5121 	slub_set_cpu_partial(s, nr_objects);
5122 #endif
5123 }
5124 
5125 /*
5126  * calculate_sizes() determines the order and the distribution of data within
5127  * a slab object.
5128  */
5129 static int calculate_sizes(struct kmem_cache *s)
5130 {
5131 	slab_flags_t flags = s->flags;
5132 	unsigned int size = s->object_size;
5133 	unsigned int order;
5134 
5135 	/*
5136 	 * Round up object size to the next word boundary. We can only
5137 	 * place the free pointer at word boundaries and this determines
5138 	 * the possible location of the free pointer.
5139 	 */
5140 	size = ALIGN(size, sizeof(void *));
5141 
5142 #ifdef CONFIG_SLUB_DEBUG
5143 	/*
5144 	 * Determine if we can poison the object itself. If the user of
5145 	 * the slab may touch the object after free or before allocation
5146 	 * then we should never poison the object itself.
5147 	 */
5148 	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
5149 			!s->ctor)
5150 		s->flags |= __OBJECT_POISON;
5151 	else
5152 		s->flags &= ~__OBJECT_POISON;
5153 
5154 
5155 	/*
5156 	 * If we are Redzoning then check if there is some space between the
5157 	 * end of the object and the free pointer. If not then add an
5158 	 * additional word to have some bytes to store Redzone information.
5159 	 */
5160 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
5161 		size += sizeof(void *);
5162 #endif
5163 
5164 	/*
5165 	 * With that we have determined the number of bytes in actual use
5166 	 * by the object and redzoning.
5167 	 */
5168 	s->inuse = size;
5169 
5170 	if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
5171 	    ((flags & SLAB_RED_ZONE) &&
5172 	     (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
5173 		/*
5174 		 * Relocate free pointer after the object if it is not
5175 		 * permitted to overwrite the first word of the object on
5176 		 * kmem_cache_free.
5177 		 *
5178 		 * This is the case if we do RCU, have a constructor or
5179 		 * destructor, are poisoning the objects, or are
5180 		 * redzoning an object smaller than sizeof(void *) or are
5181 		 * redzoning an object with slub_debug_orig_size() enabled,
5182 		 * in which case the right redzone may be extended.
5183 		 *
5184 		 * The assumption that s->offset >= s->inuse means free
5185 		 * pointer is outside of the object is used in the
5186 		 * freeptr_outside_object() function. If that is no
5187 		 * longer true, the function needs to be modified.
5188 		 */
5189 		s->offset = size;
5190 		size += sizeof(void *);
5191 	} else {
5192 		/*
5193 		 * Store freelist pointer near middle of object to keep
5194 		 * it away from the edges of the object to avoid small
5195 		 * sized over/underflows from neighboring allocations.
5196 		 */
5197 		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
5198 	}
5199 
5200 #ifdef CONFIG_SLUB_DEBUG
5201 	if (flags & SLAB_STORE_USER) {
5202 		/*
5203 		 * Need to store information about allocs and frees after
5204 		 * the object.
5205 		 */
5206 		size += 2 * sizeof(struct track);
5207 
5208 		/* Save the original kmalloc request size */
5209 		if (flags & SLAB_KMALLOC)
5210 			size += sizeof(unsigned int);
5211 	}
5212 #endif
5213 
5214 	kasan_cache_create(s, &size, &s->flags);
5215 #ifdef CONFIG_SLUB_DEBUG
5216 	if (flags & SLAB_RED_ZONE) {
5217 		/*
5218 		 * Add some empty padding so that we can catch
5219 		 * overwrites from earlier objects rather than let
5220 		 * tracking information or the free pointer be
5221 		 * corrupted if a user writes before the start
5222 		 * of the object.
5223 		 */
5224 		size += sizeof(void *);
5225 
5226 		s->red_left_pad = sizeof(void *);
5227 		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
5228 		size += s->red_left_pad;
5229 	}
5230 #endif
5231 
5232 	/*
5233 	 * SLUB stores one object immediately after another beginning from
5234 	 * offset 0. In order to align the objects we have to simply size
5235 	 * each object to conform to the alignment.
5236 	 */
5237 	size = ALIGN(size, s->align);
5238 	s->size = size;
5239 	s->reciprocal_size = reciprocal_value(size);
5240 	order = calculate_order(size);
5241 
5242 	if ((int)order < 0)
5243 		return 0;
5244 
5245 	s->allocflags = __GFP_COMP;
5246 
5247 	if (s->flags & SLAB_CACHE_DMA)
5248 		s->allocflags |= GFP_DMA;
5249 
5250 	if (s->flags & SLAB_CACHE_DMA32)
5251 		s->allocflags |= GFP_DMA32;
5252 
5253 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5254 		s->allocflags |= __GFP_RECLAIMABLE;
5255 
5256 	/*
5257 	 * Determine the number of objects per slab
5258 	 */
5259 	s->oo = oo_make(order, size);
5260 	s->min = oo_make(get_order(size), size);
5261 
5262 	return !!oo_objects(s->oo);
5263 }
5264 
5265 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
5266 {
5267 	s->flags = kmem_cache_flags(flags, s->name);
5268 #ifdef CONFIG_SLAB_FREELIST_HARDENED
5269 	s->random = get_random_long();
5270 #endif
5271 
5272 	if (!calculate_sizes(s))
5273 		goto error;
5274 	if (disable_higher_order_debug) {
5275 		/*
5276 		 * Disable debugging flags that store metadata if the min slab
5277 		 * order increased.
5278 		 */
5279 		if (get_order(s->size) > get_order(s->object_size)) {
5280 			s->flags &= ~DEBUG_METADATA_FLAGS;
5281 			s->offset = 0;
5282 			if (!calculate_sizes(s))
5283 				goto error;
5284 		}
5285 	}
5286 
5287 #ifdef system_has_freelist_aba
5288 	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
5289 		/* Enable fast mode */
5290 		s->flags |= __CMPXCHG_DOUBLE;
5291 	}
5292 #endif
5293 
5294 	/*
5295 	 * The larger the object size is, the more slabs we want on the partial
5296 	 * list to avoid pounding the page allocator excessively.
5297 	 */
5298 	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
5299 	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
5300 
5301 	set_cpu_partial(s);
5302 
5303 #ifdef CONFIG_NUMA
5304 	s->remote_node_defrag_ratio = 1000;
5305 #endif
5306 
5307 	/* Initialize the pre-computed randomized freelist if slab is up */
5308 	if (slab_state >= UP) {
5309 		if (init_cache_random_seq(s))
5310 			goto error;
5311 	}
5312 
5313 	if (!init_kmem_cache_nodes(s))
5314 		goto error;
5315 
5316 	if (alloc_kmem_cache_cpus(s))
5317 		return 0;
5318 
5319 error:
5320 	__kmem_cache_release(s);
5321 	return -EINVAL;
5322 }
5323 
5324 static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
5325 			      const char *text)
5326 {
5327 #ifdef CONFIG_SLUB_DEBUG
5328 	void *addr = slab_address(slab);
5329 	void *p;
5330 
5331 	slab_err(s, slab, text, s->name);
5332 
5333 	spin_lock(&object_map_lock);
5334 	__fill_map(object_map, s, slab);
5335 
5336 	for_each_object(p, s, addr, slab->objects) {
5337 
5338 		if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
5339 			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
5340 			print_tracking(s, p);
5341 		}
5342 	}
5343 	spin_unlock(&object_map_lock);
5344 #endif
5345 }
5346 
5347 /*
5348  * Attempt to free all partial slabs on a node.
5349  * This is called from __kmem_cache_shutdown(). We must take list_lock
5350  * because sysfs file might still access partial list after the shutdowning.
5351  */
5352 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
5353 {
5354 	LIST_HEAD(discard);
5355 	struct slab *slab, *h;
5356 
5357 	BUG_ON(irqs_disabled());
5358 	spin_lock_irq(&n->list_lock);
5359 	list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
5360 		if (!slab->inuse) {
5361 			remove_partial(n, slab);
5362 			list_add(&slab->slab_list, &discard);
5363 		} else {
5364 			list_slab_objects(s, slab,
5365 			  "Objects remaining in %s on __kmem_cache_shutdown()");
5366 		}
5367 	}
5368 	spin_unlock_irq(&n->list_lock);
5369 
5370 	list_for_each_entry_safe(slab, h, &discard, slab_list)
5371 		discard_slab(s, slab);
5372 }
5373 
5374 bool __kmem_cache_empty(struct kmem_cache *s)
5375 {
5376 	int node;
5377 	struct kmem_cache_node *n;
5378 
5379 	for_each_kmem_cache_node(s, node, n)
5380 		if (n->nr_partial || node_nr_slabs(n))
5381 			return false;
5382 	return true;
5383 }
5384 
5385 /*
5386  * Release all resources used by a slab cache.
5387  */
5388 int __kmem_cache_shutdown(struct kmem_cache *s)
5389 {
5390 	int node;
5391 	struct kmem_cache_node *n;
5392 
5393 	flush_all_cpus_locked(s);
5394 	/* Attempt to free all objects */
5395 	for_each_kmem_cache_node(s, node, n) {
5396 		free_partial(s, n);
5397 		if (n->nr_partial || node_nr_slabs(n))
5398 			return 1;
5399 	}
5400 	return 0;
5401 }
5402 
5403 #ifdef CONFIG_PRINTK
5404 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
5405 {
5406 	void *base;
5407 	int __maybe_unused i;
5408 	unsigned int objnr;
5409 	void *objp;
5410 	void *objp0;
5411 	struct kmem_cache *s = slab->slab_cache;
5412 	struct track __maybe_unused *trackp;
5413 
5414 	kpp->kp_ptr = object;
5415 	kpp->kp_slab = slab;
5416 	kpp->kp_slab_cache = s;
5417 	base = slab_address(slab);
5418 	objp0 = kasan_reset_tag(object);
5419 #ifdef CONFIG_SLUB_DEBUG
5420 	objp = restore_red_left(s, objp0);
5421 #else
5422 	objp = objp0;
5423 #endif
5424 	objnr = obj_to_index(s, slab, objp);
5425 	kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
5426 	objp = base + s->size * objnr;
5427 	kpp->kp_objp = objp;
5428 	if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
5429 			 || (objp - base) % s->size) ||
5430 	    !(s->flags & SLAB_STORE_USER))
5431 		return;
5432 #ifdef CONFIG_SLUB_DEBUG
5433 	objp = fixup_red_left(s, objp);
5434 	trackp = get_track(s, objp, TRACK_ALLOC);
5435 	kpp->kp_ret = (void *)trackp->addr;
5436 #ifdef CONFIG_STACKDEPOT
5437 	{
5438 		depot_stack_handle_t handle;
5439 		unsigned long *entries;
5440 		unsigned int nr_entries;
5441 
5442 		handle = READ_ONCE(trackp->handle);
5443 		if (handle) {
5444 			nr_entries = stack_depot_fetch(handle, &entries);
5445 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5446 				kpp->kp_stack[i] = (void *)entries[i];
5447 		}
5448 
5449 		trackp = get_track(s, objp, TRACK_FREE);
5450 		handle = READ_ONCE(trackp->handle);
5451 		if (handle) {
5452 			nr_entries = stack_depot_fetch(handle, &entries);
5453 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5454 				kpp->kp_free_stack[i] = (void *)entries[i];
5455 		}
5456 	}
5457 #endif
5458 #endif
5459 }
5460 #endif
5461 
5462 /********************************************************************
5463  *		Kmalloc subsystem
5464  *******************************************************************/
5465 
5466 static int __init setup_slub_min_order(char *str)
5467 {
5468 	get_option(&str, (int *)&slub_min_order);
5469 
5470 	if (slub_min_order > slub_max_order)
5471 		slub_max_order = slub_min_order;
5472 
5473 	return 1;
5474 }
5475 
5476 __setup("slab_min_order=", setup_slub_min_order);
5477 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0);
5478 
5479 
5480 static int __init setup_slub_max_order(char *str)
5481 {
5482 	get_option(&str, (int *)&slub_max_order);
5483 	slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
5484 
5485 	if (slub_min_order > slub_max_order)
5486 		slub_min_order = slub_max_order;
5487 
5488 	return 1;
5489 }
5490 
5491 __setup("slab_max_order=", setup_slub_max_order);
5492 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0);
5493 
5494 static int __init setup_slub_min_objects(char *str)
5495 {
5496 	get_option(&str, (int *)&slub_min_objects);
5497 
5498 	return 1;
5499 }
5500 
5501 __setup("slab_min_objects=", setup_slub_min_objects);
5502 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);
5503 
5504 #ifdef CONFIG_HARDENED_USERCOPY
5505 /*
5506  * Rejects incorrectly sized objects and objects that are to be copied
5507  * to/from userspace but do not fall entirely within the containing slab
5508  * cache's usercopy region.
5509  *
5510  * Returns NULL if check passes, otherwise const char * to name of cache
5511  * to indicate an error.
5512  */
5513 void __check_heap_object(const void *ptr, unsigned long n,
5514 			 const struct slab *slab, bool to_user)
5515 {
5516 	struct kmem_cache *s;
5517 	unsigned int offset;
5518 	bool is_kfence = is_kfence_address(ptr);
5519 
5520 	ptr = kasan_reset_tag(ptr);
5521 
5522 	/* Find object and usable object size. */
5523 	s = slab->slab_cache;
5524 
5525 	/* Reject impossible pointers. */
5526 	if (ptr < slab_address(slab))
5527 		usercopy_abort("SLUB object not in SLUB page?!", NULL,
5528 			       to_user, 0, n);
5529 
5530 	/* Find offset within object. */
5531 	if (is_kfence)
5532 		offset = ptr - kfence_object_start(ptr);
5533 	else
5534 		offset = (ptr - slab_address(slab)) % s->size;
5535 
5536 	/* Adjust for redzone and reject if within the redzone. */
5537 	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
5538 		if (offset < s->red_left_pad)
5539 			usercopy_abort("SLUB object in left red zone",
5540 				       s->name, to_user, offset, n);
5541 		offset -= s->red_left_pad;
5542 	}
5543 
5544 	/* Allow address range falling entirely within usercopy region. */
5545 	if (offset >= s->useroffset &&
5546 	    offset - s->useroffset <= s->usersize &&
5547 	    n <= s->useroffset - offset + s->usersize)
5548 		return;
5549 
5550 	usercopy_abort("SLUB object", s->name, to_user, offset, n);
5551 }
5552 #endif /* CONFIG_HARDENED_USERCOPY */
5553 
5554 #define SHRINK_PROMOTE_MAX 32
5555 
5556 /*
5557  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
5558  * up most to the head of the partial lists. New allocations will then
5559  * fill those up and thus they can be removed from the partial lists.
5560  *
5561  * The slabs with the least items are placed last. This results in them
5562  * being allocated from last increasing the chance that the last objects
5563  * are freed in them.
5564  */
5565 static int __kmem_cache_do_shrink(struct kmem_cache *s)
5566 {
5567 	int node;
5568 	int i;
5569 	struct kmem_cache_node *n;
5570 	struct slab *slab;
5571 	struct slab *t;
5572 	struct list_head discard;
5573 	struct list_head promote[SHRINK_PROMOTE_MAX];
5574 	unsigned long flags;
5575 	int ret = 0;
5576 
5577 	for_each_kmem_cache_node(s, node, n) {
5578 		INIT_LIST_HEAD(&discard);
5579 		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
5580 			INIT_LIST_HEAD(promote + i);
5581 
5582 		spin_lock_irqsave(&n->list_lock, flags);
5583 
5584 		/*
5585 		 * Build lists of slabs to discard or promote.
5586 		 *
5587 		 * Note that concurrent frees may occur while we hold the
5588 		 * list_lock. slab->inuse here is the upper limit.
5589 		 */
5590 		list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
5591 			int free = slab->objects - slab->inuse;
5592 
5593 			/* Do not reread slab->inuse */
5594 			barrier();
5595 
5596 			/* We do not keep full slabs on the list */
5597 			BUG_ON(free <= 0);
5598 
5599 			if (free == slab->objects) {
5600 				list_move(&slab->slab_list, &discard);
5601 				slab_clear_node_partial(slab);
5602 				n->nr_partial--;
5603 				dec_slabs_node(s, node, slab->objects);
5604 			} else if (free <= SHRINK_PROMOTE_MAX)
5605 				list_move(&slab->slab_list, promote + free - 1);
5606 		}
5607 
5608 		/*
5609 		 * Promote the slabs filled up most to the head of the
5610 		 * partial list.
5611 		 */
5612 		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
5613 			list_splice(promote + i, &n->partial);
5614 
5615 		spin_unlock_irqrestore(&n->list_lock, flags);
5616 
5617 		/* Release empty slabs */
5618 		list_for_each_entry_safe(slab, t, &discard, slab_list)
5619 			free_slab(s, slab);
5620 
5621 		if (node_nr_slabs(n))
5622 			ret = 1;
5623 	}
5624 
5625 	return ret;
5626 }
5627 
5628 int __kmem_cache_shrink(struct kmem_cache *s)
5629 {
5630 	flush_all(s);
5631 	return __kmem_cache_do_shrink(s);
5632 }
5633 
5634 static int slab_mem_going_offline_callback(void *arg)
5635 {
5636 	struct kmem_cache *s;
5637 
5638 	mutex_lock(&slab_mutex);
5639 	list_for_each_entry(s, &slab_caches, list) {
5640 		flush_all_cpus_locked(s);
5641 		__kmem_cache_do_shrink(s);
5642 	}
5643 	mutex_unlock(&slab_mutex);
5644 
5645 	return 0;
5646 }
5647 
5648 static void slab_mem_offline_callback(void *arg)
5649 {
5650 	struct memory_notify *marg = arg;
5651 	int offline_node;
5652 
5653 	offline_node = marg->status_change_nid_normal;
5654 
5655 	/*
5656 	 * If the node still has available memory. we need kmem_cache_node
5657 	 * for it yet.
5658 	 */
5659 	if (offline_node < 0)
5660 		return;
5661 
5662 	mutex_lock(&slab_mutex);
5663 	node_clear(offline_node, slab_nodes);
5664 	/*
5665 	 * We no longer free kmem_cache_node structures here, as it would be
5666 	 * racy with all get_node() users, and infeasible to protect them with
5667 	 * slab_mutex.
5668 	 */
5669 	mutex_unlock(&slab_mutex);
5670 }
5671 
5672 static int slab_mem_going_online_callback(void *arg)
5673 {
5674 	struct kmem_cache_node *n;
5675 	struct kmem_cache *s;
5676 	struct memory_notify *marg = arg;
5677 	int nid = marg->status_change_nid_normal;
5678 	int ret = 0;
5679 
5680 	/*
5681 	 * If the node's memory is already available, then kmem_cache_node is
5682 	 * already created. Nothing to do.
5683 	 */
5684 	if (nid < 0)
5685 		return 0;
5686 
5687 	/*
5688 	 * We are bringing a node online. No memory is available yet. We must
5689 	 * allocate a kmem_cache_node structure in order to bring the node
5690 	 * online.
5691 	 */
5692 	mutex_lock(&slab_mutex);
5693 	list_for_each_entry(s, &slab_caches, list) {
5694 		/*
5695 		 * The structure may already exist if the node was previously
5696 		 * onlined and offlined.
5697 		 */
5698 		if (get_node(s, nid))
5699 			continue;
5700 		/*
5701 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
5702 		 *      since memory is not yet available from the node that
5703 		 *      is brought up.
5704 		 */
5705 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
5706 		if (!n) {
5707 			ret = -ENOMEM;
5708 			goto out;
5709 		}
5710 		init_kmem_cache_node(n);
5711 		s->node[nid] = n;
5712 	}
5713 	/*
5714 	 * Any cache created after this point will also have kmem_cache_node
5715 	 * initialized for the new node.
5716 	 */
5717 	node_set(nid, slab_nodes);
5718 out:
5719 	mutex_unlock(&slab_mutex);
5720 	return ret;
5721 }
5722 
5723 static int slab_memory_callback(struct notifier_block *self,
5724 				unsigned long action, void *arg)
5725 {
5726 	int ret = 0;
5727 
5728 	switch (action) {
5729 	case MEM_GOING_ONLINE:
5730 		ret = slab_mem_going_online_callback(arg);
5731 		break;
5732 	case MEM_GOING_OFFLINE:
5733 		ret = slab_mem_going_offline_callback(arg);
5734 		break;
5735 	case MEM_OFFLINE:
5736 	case MEM_CANCEL_ONLINE:
5737 		slab_mem_offline_callback(arg);
5738 		break;
5739 	case MEM_ONLINE:
5740 	case MEM_CANCEL_OFFLINE:
5741 		break;
5742 	}
5743 	if (ret)
5744 		ret = notifier_from_errno(ret);
5745 	else
5746 		ret = NOTIFY_OK;
5747 	return ret;
5748 }
5749 
5750 /********************************************************************
5751  *			Basic setup of slabs
5752  *******************************************************************/
5753 
5754 /*
5755  * Used for early kmem_cache structures that were allocated using
5756  * the page allocator. Allocate them properly then fix up the pointers
5757  * that may be pointing to the wrong kmem_cache structure.
5758  */
5759 
5760 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
5761 {
5762 	int node;
5763 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
5764 	struct kmem_cache_node *n;
5765 
5766 	memcpy(s, static_cache, kmem_cache->object_size);
5767 
5768 	/*
5769 	 * This runs very early, and only the boot processor is supposed to be
5770 	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
5771 	 * IPIs around.
5772 	 */
5773 	__flush_cpu_slab(s, smp_processor_id());
5774 	for_each_kmem_cache_node(s, node, n) {
5775 		struct slab *p;
5776 
5777 		list_for_each_entry(p, &n->partial, slab_list)
5778 			p->slab_cache = s;
5779 
5780 #ifdef CONFIG_SLUB_DEBUG
5781 		list_for_each_entry(p, &n->full, slab_list)
5782 			p->slab_cache = s;
5783 #endif
5784 	}
5785 	list_add(&s->list, &slab_caches);
5786 	return s;
5787 }
5788 
5789 void __init kmem_cache_init(void)
5790 {
5791 	static __initdata struct kmem_cache boot_kmem_cache,
5792 		boot_kmem_cache_node;
5793 	int node;
5794 
5795 	if (debug_guardpage_minorder())
5796 		slub_max_order = 0;
5797 
5798 	/* Print slub debugging pointers without hashing */
5799 	if (__slub_debug_enabled())
5800 		no_hash_pointers_enable(NULL);
5801 
5802 	kmem_cache_node = &boot_kmem_cache_node;
5803 	kmem_cache = &boot_kmem_cache;
5804 
5805 	/*
5806 	 * Initialize the nodemask for which we will allocate per node
5807 	 * structures. Here we don't need taking slab_mutex yet.
5808 	 */
5809 	for_each_node_state(node, N_NORMAL_MEMORY)
5810 		node_set(node, slab_nodes);
5811 
5812 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
5813 			sizeof(struct kmem_cache_node),
5814 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
5815 
5816 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
5817 
5818 	/* Able to allocate the per node structures */
5819 	slab_state = PARTIAL;
5820 
5821 	create_boot_cache(kmem_cache, "kmem_cache",
5822 			offsetof(struct kmem_cache, node) +
5823 				nr_node_ids * sizeof(struct kmem_cache_node *),
5824 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
5825 
5826 	kmem_cache = bootstrap(&boot_kmem_cache);
5827 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
5828 
5829 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
5830 	setup_kmalloc_cache_index_table();
5831 	create_kmalloc_caches();
5832 
5833 	/* Setup random freelists for each cache */
5834 	init_freelist_randomization();
5835 
5836 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
5837 				  slub_cpu_dead);
5838 
5839 	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
5840 		cache_line_size(),
5841 		slub_min_order, slub_max_order, slub_min_objects,
5842 		nr_cpu_ids, nr_node_ids);
5843 }
5844 
5845 void __init kmem_cache_init_late(void)
5846 {
5847 #ifndef CONFIG_SLUB_TINY
5848 	flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
5849 	WARN_ON(!flushwq);
5850 #endif
5851 }
5852 
5853 struct kmem_cache *
5854 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
5855 		   slab_flags_t flags, void (*ctor)(void *))
5856 {
5857 	struct kmem_cache *s;
5858 
5859 	s = find_mergeable(size, align, flags, name, ctor);
5860 	if (s) {
5861 		if (sysfs_slab_alias(s, name))
5862 			return NULL;
5863 
5864 		s->refcount++;
5865 
5866 		/*
5867 		 * Adjust the object sizes so that we clear
5868 		 * the complete object on kzalloc.
5869 		 */
5870 		s->object_size = max(s->object_size, size);
5871 		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
5872 	}
5873 
5874 	return s;
5875 }
5876 
5877 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
5878 {
5879 	int err;
5880 
5881 	err = kmem_cache_open(s, flags);
5882 	if (err)
5883 		return err;
5884 
5885 	/* Mutex is not taken during early boot */
5886 	if (slab_state <= UP)
5887 		return 0;
5888 
5889 	err = sysfs_slab_add(s);
5890 	if (err) {
5891 		__kmem_cache_release(s);
5892 		return err;
5893 	}
5894 
5895 	if (s->flags & SLAB_STORE_USER)
5896 		debugfs_slab_add(s);
5897 
5898 	return 0;
5899 }
5900 
5901 #ifdef SLAB_SUPPORTS_SYSFS
5902 static int count_inuse(struct slab *slab)
5903 {
5904 	return slab->inuse;
5905 }
5906 
5907 static int count_total(struct slab *slab)
5908 {
5909 	return slab->objects;
5910 }
5911 #endif
5912 
5913 #ifdef CONFIG_SLUB_DEBUG
5914 static void validate_slab(struct kmem_cache *s, struct slab *slab,
5915 			  unsigned long *obj_map)
5916 {
5917 	void *p;
5918 	void *addr = slab_address(slab);
5919 
5920 	if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
5921 		return;
5922 
5923 	/* Now we know that a valid freelist exists */
5924 	__fill_map(obj_map, s, slab);
5925 	for_each_object(p, s, addr, slab->objects) {
5926 		u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
5927 			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
5928 
5929 		if (!check_object(s, slab, p, val))
5930 			break;
5931 	}
5932 }
5933 
5934 static int validate_slab_node(struct kmem_cache *s,
5935 		struct kmem_cache_node *n, unsigned long *obj_map)
5936 {
5937 	unsigned long count = 0;
5938 	struct slab *slab;
5939 	unsigned long flags;
5940 
5941 	spin_lock_irqsave(&n->list_lock, flags);
5942 
5943 	list_for_each_entry(slab, &n->partial, slab_list) {
5944 		validate_slab(s, slab, obj_map);
5945 		count++;
5946 	}
5947 	if (count != n->nr_partial) {
5948 		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
5949 		       s->name, count, n->nr_partial);
5950 		slab_add_kunit_errors();
5951 	}
5952 
5953 	if (!(s->flags & SLAB_STORE_USER))
5954 		goto out;
5955 
5956 	list_for_each_entry(slab, &n->full, slab_list) {
5957 		validate_slab(s, slab, obj_map);
5958 		count++;
5959 	}
5960 	if (count != node_nr_slabs(n)) {
5961 		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
5962 		       s->name, count, node_nr_slabs(n));
5963 		slab_add_kunit_errors();
5964 	}
5965 
5966 out:
5967 	spin_unlock_irqrestore(&n->list_lock, flags);
5968 	return count;
5969 }
5970 
5971 long validate_slab_cache(struct kmem_cache *s)
5972 {
5973 	int node;
5974 	unsigned long count = 0;
5975 	struct kmem_cache_node *n;
5976 	unsigned long *obj_map;
5977 
5978 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
5979 	if (!obj_map)
5980 		return -ENOMEM;
5981 
5982 	flush_all(s);
5983 	for_each_kmem_cache_node(s, node, n)
5984 		count += validate_slab_node(s, n, obj_map);
5985 
5986 	bitmap_free(obj_map);
5987 
5988 	return count;
5989 }
5990 EXPORT_SYMBOL(validate_slab_cache);
5991 
5992 #ifdef CONFIG_DEBUG_FS
5993 /*
5994  * Generate lists of code addresses where slabcache objects are allocated
5995  * and freed.
5996  */
5997 
5998 struct location {
5999 	depot_stack_handle_t handle;
6000 	unsigned long count;
6001 	unsigned long addr;
6002 	unsigned long waste;
6003 	long long sum_time;
6004 	long min_time;
6005 	long max_time;
6006 	long min_pid;
6007 	long max_pid;
6008 	DECLARE_BITMAP(cpus, NR_CPUS);
6009 	nodemask_t nodes;
6010 };
6011 
6012 struct loc_track {
6013 	unsigned long max;
6014 	unsigned long count;
6015 	struct location *loc;
6016 	loff_t idx;
6017 };
6018 
6019 static struct dentry *slab_debugfs_root;
6020 
6021 static void free_loc_track(struct loc_track *t)
6022 {
6023 	if (t->max)
6024 		free_pages((unsigned long)t->loc,
6025 			get_order(sizeof(struct location) * t->max));
6026 }
6027 
6028 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
6029 {
6030 	struct location *l;
6031 	int order;
6032 
6033 	order = get_order(sizeof(struct location) * max);
6034 
6035 	l = (void *)__get_free_pages(flags, order);
6036 	if (!l)
6037 		return 0;
6038 
6039 	if (t->count) {
6040 		memcpy(l, t->loc, sizeof(struct location) * t->count);
6041 		free_loc_track(t);
6042 	}
6043 	t->max = max;
6044 	t->loc = l;
6045 	return 1;
6046 }
6047 
6048 static int add_location(struct loc_track *t, struct kmem_cache *s,
6049 				const struct track *track,
6050 				unsigned int orig_size)
6051 {
6052 	long start, end, pos;
6053 	struct location *l;
6054 	unsigned long caddr, chandle, cwaste;
6055 	unsigned long age = jiffies - track->when;
6056 	depot_stack_handle_t handle = 0;
6057 	unsigned int waste = s->object_size - orig_size;
6058 
6059 #ifdef CONFIG_STACKDEPOT
6060 	handle = READ_ONCE(track->handle);
6061 #endif
6062 	start = -1;
6063 	end = t->count;
6064 
6065 	for ( ; ; ) {
6066 		pos = start + (end - start + 1) / 2;
6067 
6068 		/*
6069 		 * There is nothing at "end". If we end up there
6070 		 * we need to add something to before end.
6071 		 */
6072 		if (pos == end)
6073 			break;
6074 
6075 		l = &t->loc[pos];
6076 		caddr = l->addr;
6077 		chandle = l->handle;
6078 		cwaste = l->waste;
6079 		if ((track->addr == caddr) && (handle == chandle) &&
6080 			(waste == cwaste)) {
6081 
6082 			l->count++;
6083 			if (track->when) {
6084 				l->sum_time += age;
6085 				if (age < l->min_time)
6086 					l->min_time = age;
6087 				if (age > l->max_time)
6088 					l->max_time = age;
6089 
6090 				if (track->pid < l->min_pid)
6091 					l->min_pid = track->pid;
6092 				if (track->pid > l->max_pid)
6093 					l->max_pid = track->pid;
6094 
6095 				cpumask_set_cpu(track->cpu,
6096 						to_cpumask(l->cpus));
6097 			}
6098 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
6099 			return 1;
6100 		}
6101 
6102 		if (track->addr < caddr)
6103 			end = pos;
6104 		else if (track->addr == caddr && handle < chandle)
6105 			end = pos;
6106 		else if (track->addr == caddr && handle == chandle &&
6107 				waste < cwaste)
6108 			end = pos;
6109 		else
6110 			start = pos;
6111 	}
6112 
6113 	/*
6114 	 * Not found. Insert new tracking element.
6115 	 */
6116 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
6117 		return 0;
6118 
6119 	l = t->loc + pos;
6120 	if (pos < t->count)
6121 		memmove(l + 1, l,
6122 			(t->count - pos) * sizeof(struct location));
6123 	t->count++;
6124 	l->count = 1;
6125 	l->addr = track->addr;
6126 	l->sum_time = age;
6127 	l->min_time = age;
6128 	l->max_time = age;
6129 	l->min_pid = track->pid;
6130 	l->max_pid = track->pid;
6131 	l->handle = handle;
6132 	l->waste = waste;
6133 	cpumask_clear(to_cpumask(l->cpus));
6134 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
6135 	nodes_clear(l->nodes);
6136 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
6137 	return 1;
6138 }
6139 
6140 static void process_slab(struct loc_track *t, struct kmem_cache *s,
6141 		struct slab *slab, enum track_item alloc,
6142 		unsigned long *obj_map)
6143 {
6144 	void *addr = slab_address(slab);
6145 	bool is_alloc = (alloc == TRACK_ALLOC);
6146 	void *p;
6147 
6148 	__fill_map(obj_map, s, slab);
6149 
6150 	for_each_object(p, s, addr, slab->objects)
6151 		if (!test_bit(__obj_to_index(s, addr, p), obj_map))
6152 			add_location(t, s, get_track(s, p, alloc),
6153 				     is_alloc ? get_orig_size(s, p) :
6154 						s->object_size);
6155 }
6156 #endif  /* CONFIG_DEBUG_FS   */
6157 #endif	/* CONFIG_SLUB_DEBUG */
6158 
6159 #ifdef SLAB_SUPPORTS_SYSFS
6160 enum slab_stat_type {
6161 	SL_ALL,			/* All slabs */
6162 	SL_PARTIAL,		/* Only partially allocated slabs */
6163 	SL_CPU,			/* Only slabs used for cpu caches */
6164 	SL_OBJECTS,		/* Determine allocated objects not slabs */
6165 	SL_TOTAL		/* Determine object capacity not slabs */
6166 };
6167 
6168 #define SO_ALL		(1 << SL_ALL)
6169 #define SO_PARTIAL	(1 << SL_PARTIAL)
6170 #define SO_CPU		(1 << SL_CPU)
6171 #define SO_OBJECTS	(1 << SL_OBJECTS)
6172 #define SO_TOTAL	(1 << SL_TOTAL)
6173 
6174 static ssize_t show_slab_objects(struct kmem_cache *s,
6175 				 char *buf, unsigned long flags)
6176 {
6177 	unsigned long total = 0;
6178 	int node;
6179 	int x;
6180 	unsigned long *nodes;
6181 	int len = 0;
6182 
6183 	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
6184 	if (!nodes)
6185 		return -ENOMEM;
6186 
6187 	if (flags & SO_CPU) {
6188 		int cpu;
6189 
6190 		for_each_possible_cpu(cpu) {
6191 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
6192 							       cpu);
6193 			int node;
6194 			struct slab *slab;
6195 
6196 			slab = READ_ONCE(c->slab);
6197 			if (!slab)
6198 				continue;
6199 
6200 			node = slab_nid(slab);
6201 			if (flags & SO_TOTAL)
6202 				x = slab->objects;
6203 			else if (flags & SO_OBJECTS)
6204 				x = slab->inuse;
6205 			else
6206 				x = 1;
6207 
6208 			total += x;
6209 			nodes[node] += x;
6210 
6211 #ifdef CONFIG_SLUB_CPU_PARTIAL
6212 			slab = slub_percpu_partial_read_once(c);
6213 			if (slab) {
6214 				node = slab_nid(slab);
6215 				if (flags & SO_TOTAL)
6216 					WARN_ON_ONCE(1);
6217 				else if (flags & SO_OBJECTS)
6218 					WARN_ON_ONCE(1);
6219 				else
6220 					x = data_race(slab->slabs);
6221 				total += x;
6222 				nodes[node] += x;
6223 			}
6224 #endif
6225 		}
6226 	}
6227 
6228 	/*
6229 	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
6230 	 * already held which will conflict with an existing lock order:
6231 	 *
6232 	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
6233 	 *
6234 	 * We don't really need mem_hotplug_lock (to hold off
6235 	 * slab_mem_going_offline_callback) here because slab's memory hot
6236 	 * unplug code doesn't destroy the kmem_cache->node[] data.
6237 	 */
6238 
6239 #ifdef CONFIG_SLUB_DEBUG
6240 	if (flags & SO_ALL) {
6241 		struct kmem_cache_node *n;
6242 
6243 		for_each_kmem_cache_node(s, node, n) {
6244 
6245 			if (flags & SO_TOTAL)
6246 				x = node_nr_objs(n);
6247 			else if (flags & SO_OBJECTS)
6248 				x = node_nr_objs(n) - count_partial(n, count_free);
6249 			else
6250 				x = node_nr_slabs(n);
6251 			total += x;
6252 			nodes[node] += x;
6253 		}
6254 
6255 	} else
6256 #endif
6257 	if (flags & SO_PARTIAL) {
6258 		struct kmem_cache_node *n;
6259 
6260 		for_each_kmem_cache_node(s, node, n) {
6261 			if (flags & SO_TOTAL)
6262 				x = count_partial(n, count_total);
6263 			else if (flags & SO_OBJECTS)
6264 				x = count_partial(n, count_inuse);
6265 			else
6266 				x = n->nr_partial;
6267 			total += x;
6268 			nodes[node] += x;
6269 		}
6270 	}
6271 
6272 	len += sysfs_emit_at(buf, len, "%lu", total);
6273 #ifdef CONFIG_NUMA
6274 	for (node = 0; node < nr_node_ids; node++) {
6275 		if (nodes[node])
6276 			len += sysfs_emit_at(buf, len, " N%d=%lu",
6277 					     node, nodes[node]);
6278 	}
6279 #endif
6280 	len += sysfs_emit_at(buf, len, "\n");
6281 	kfree(nodes);
6282 
6283 	return len;
6284 }
6285 
6286 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
6287 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
6288 
6289 struct slab_attribute {
6290 	struct attribute attr;
6291 	ssize_t (*show)(struct kmem_cache *s, char *buf);
6292 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
6293 };
6294 
6295 #define SLAB_ATTR_RO(_name) \
6296 	static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
6297 
6298 #define SLAB_ATTR(_name) \
6299 	static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
6300 
6301 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
6302 {
6303 	return sysfs_emit(buf, "%u\n", s->size);
6304 }
6305 SLAB_ATTR_RO(slab_size);
6306 
6307 static ssize_t align_show(struct kmem_cache *s, char *buf)
6308 {
6309 	return sysfs_emit(buf, "%u\n", s->align);
6310 }
6311 SLAB_ATTR_RO(align);
6312 
6313 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
6314 {
6315 	return sysfs_emit(buf, "%u\n", s->object_size);
6316 }
6317 SLAB_ATTR_RO(object_size);
6318 
6319 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
6320 {
6321 	return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
6322 }
6323 SLAB_ATTR_RO(objs_per_slab);
6324 
6325 static ssize_t order_show(struct kmem_cache *s, char *buf)
6326 {
6327 	return sysfs_emit(buf, "%u\n", oo_order(s->oo));
6328 }
6329 SLAB_ATTR_RO(order);
6330 
6331 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
6332 {
6333 	return sysfs_emit(buf, "%lu\n", s->min_partial);
6334 }
6335 
6336 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
6337 				 size_t length)
6338 {
6339 	unsigned long min;
6340 	int err;
6341 
6342 	err = kstrtoul(buf, 10, &min);
6343 	if (err)
6344 		return err;
6345 
6346 	s->min_partial = min;
6347 	return length;
6348 }
6349 SLAB_ATTR(min_partial);
6350 
6351 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
6352 {
6353 	unsigned int nr_partial = 0;
6354 #ifdef CONFIG_SLUB_CPU_PARTIAL
6355 	nr_partial = s->cpu_partial;
6356 #endif
6357 
6358 	return sysfs_emit(buf, "%u\n", nr_partial);
6359 }
6360 
6361 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
6362 				 size_t length)
6363 {
6364 	unsigned int objects;
6365 	int err;
6366 
6367 	err = kstrtouint(buf, 10, &objects);
6368 	if (err)
6369 		return err;
6370 	if (objects && !kmem_cache_has_cpu_partial(s))
6371 		return -EINVAL;
6372 
6373 	slub_set_cpu_partial(s, objects);
6374 	flush_all(s);
6375 	return length;
6376 }
6377 SLAB_ATTR(cpu_partial);
6378 
6379 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
6380 {
6381 	if (!s->ctor)
6382 		return 0;
6383 	return sysfs_emit(buf, "%pS\n", s->ctor);
6384 }
6385 SLAB_ATTR_RO(ctor);
6386 
6387 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
6388 {
6389 	return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
6390 }
6391 SLAB_ATTR_RO(aliases);
6392 
6393 static ssize_t partial_show(struct kmem_cache *s, char *buf)
6394 {
6395 	return show_slab_objects(s, buf, SO_PARTIAL);
6396 }
6397 SLAB_ATTR_RO(partial);
6398 
6399 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
6400 {
6401 	return show_slab_objects(s, buf, SO_CPU);
6402 }
6403 SLAB_ATTR_RO(cpu_slabs);
6404 
6405 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
6406 {
6407 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
6408 }
6409 SLAB_ATTR_RO(objects_partial);
6410 
6411 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
6412 {
6413 	int objects = 0;
6414 	int slabs = 0;
6415 	int cpu __maybe_unused;
6416 	int len = 0;
6417 
6418 #ifdef CONFIG_SLUB_CPU_PARTIAL
6419 	for_each_online_cpu(cpu) {
6420 		struct slab *slab;
6421 
6422 		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6423 
6424 		if (slab)
6425 			slabs += data_race(slab->slabs);
6426 	}
6427 #endif
6428 
6429 	/* Approximate half-full slabs, see slub_set_cpu_partial() */
6430 	objects = (slabs * oo_objects(s->oo)) / 2;
6431 	len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
6432 
6433 #ifdef CONFIG_SLUB_CPU_PARTIAL
6434 	for_each_online_cpu(cpu) {
6435 		struct slab *slab;
6436 
6437 		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6438 		if (slab) {
6439 			slabs = data_race(slab->slabs);
6440 			objects = (slabs * oo_objects(s->oo)) / 2;
6441 			len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
6442 					     cpu, objects, slabs);
6443 		}
6444 	}
6445 #endif
6446 	len += sysfs_emit_at(buf, len, "\n");
6447 
6448 	return len;
6449 }
6450 SLAB_ATTR_RO(slabs_cpu_partial);
6451 
6452 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
6453 {
6454 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
6455 }
6456 SLAB_ATTR_RO(reclaim_account);
6457 
6458 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
6459 {
6460 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
6461 }
6462 SLAB_ATTR_RO(hwcache_align);
6463 
6464 #ifdef CONFIG_ZONE_DMA
6465 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
6466 {
6467 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
6468 }
6469 SLAB_ATTR_RO(cache_dma);
6470 #endif
6471 
6472 #ifdef CONFIG_HARDENED_USERCOPY
6473 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
6474 {
6475 	return sysfs_emit(buf, "%u\n", s->usersize);
6476 }
6477 SLAB_ATTR_RO(usersize);
6478 #endif
6479 
6480 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
6481 {
6482 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
6483 }
6484 SLAB_ATTR_RO(destroy_by_rcu);
6485 
6486 #ifdef CONFIG_SLUB_DEBUG
6487 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
6488 {
6489 	return show_slab_objects(s, buf, SO_ALL);
6490 }
6491 SLAB_ATTR_RO(slabs);
6492 
6493 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
6494 {
6495 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
6496 }
6497 SLAB_ATTR_RO(total_objects);
6498 
6499 static ssize_t objects_show(struct kmem_cache *s, char *buf)
6500 {
6501 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
6502 }
6503 SLAB_ATTR_RO(objects);
6504 
6505 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
6506 {
6507 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
6508 }
6509 SLAB_ATTR_RO(sanity_checks);
6510 
6511 static ssize_t trace_show(struct kmem_cache *s, char *buf)
6512 {
6513 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
6514 }
6515 SLAB_ATTR_RO(trace);
6516 
6517 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
6518 {
6519 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
6520 }
6521 
6522 SLAB_ATTR_RO(red_zone);
6523 
6524 static ssize_t poison_show(struct kmem_cache *s, char *buf)
6525 {
6526 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
6527 }
6528 
6529 SLAB_ATTR_RO(poison);
6530 
6531 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
6532 {
6533 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
6534 }
6535 
6536 SLAB_ATTR_RO(store_user);
6537 
6538 static ssize_t validate_show(struct kmem_cache *s, char *buf)
6539 {
6540 	return 0;
6541 }
6542 
6543 static ssize_t validate_store(struct kmem_cache *s,
6544 			const char *buf, size_t length)
6545 {
6546 	int ret = -EINVAL;
6547 
6548 	if (buf[0] == '1' && kmem_cache_debug(s)) {
6549 		ret = validate_slab_cache(s);
6550 		if (ret >= 0)
6551 			ret = length;
6552 	}
6553 	return ret;
6554 }
6555 SLAB_ATTR(validate);
6556 
6557 #endif /* CONFIG_SLUB_DEBUG */
6558 
6559 #ifdef CONFIG_FAILSLAB
6560 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
6561 {
6562 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
6563 }
6564 
6565 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
6566 				size_t length)
6567 {
6568 	if (s->refcount > 1)
6569 		return -EINVAL;
6570 
6571 	if (buf[0] == '1')
6572 		WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
6573 	else
6574 		WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
6575 
6576 	return length;
6577 }
6578 SLAB_ATTR(failslab);
6579 #endif
6580 
6581 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
6582 {
6583 	return 0;
6584 }
6585 
6586 static ssize_t shrink_store(struct kmem_cache *s,
6587 			const char *buf, size_t length)
6588 {
6589 	if (buf[0] == '1')
6590 		kmem_cache_shrink(s);
6591 	else
6592 		return -EINVAL;
6593 	return length;
6594 }
6595 SLAB_ATTR(shrink);
6596 
6597 #ifdef CONFIG_NUMA
6598 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
6599 {
6600 	return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
6601 }
6602 
6603 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
6604 				const char *buf, size_t length)
6605 {
6606 	unsigned int ratio;
6607 	int err;
6608 
6609 	err = kstrtouint(buf, 10, &ratio);
6610 	if (err)
6611 		return err;
6612 	if (ratio > 100)
6613 		return -ERANGE;
6614 
6615 	s->remote_node_defrag_ratio = ratio * 10;
6616 
6617 	return length;
6618 }
6619 SLAB_ATTR(remote_node_defrag_ratio);
6620 #endif
6621 
6622 #ifdef CONFIG_SLUB_STATS
6623 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
6624 {
6625 	unsigned long sum  = 0;
6626 	int cpu;
6627 	int len = 0;
6628 	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
6629 
6630 	if (!data)
6631 		return -ENOMEM;
6632 
6633 	for_each_online_cpu(cpu) {
6634 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
6635 
6636 		data[cpu] = x;
6637 		sum += x;
6638 	}
6639 
6640 	len += sysfs_emit_at(buf, len, "%lu", sum);
6641 
6642 #ifdef CONFIG_SMP
6643 	for_each_online_cpu(cpu) {
6644 		if (data[cpu])
6645 			len += sysfs_emit_at(buf, len, " C%d=%u",
6646 					     cpu, data[cpu]);
6647 	}
6648 #endif
6649 	kfree(data);
6650 	len += sysfs_emit_at(buf, len, "\n");
6651 
6652 	return len;
6653 }
6654 
6655 static void clear_stat(struct kmem_cache *s, enum stat_item si)
6656 {
6657 	int cpu;
6658 
6659 	for_each_online_cpu(cpu)
6660 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
6661 }
6662 
6663 #define STAT_ATTR(si, text) 					\
6664 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
6665 {								\
6666 	return show_stat(s, buf, si);				\
6667 }								\
6668 static ssize_t text##_store(struct kmem_cache *s,		\
6669 				const char *buf, size_t length)	\
6670 {								\
6671 	if (buf[0] != '0')					\
6672 		return -EINVAL;					\
6673 	clear_stat(s, si);					\
6674 	return length;						\
6675 }								\
6676 SLAB_ATTR(text);						\
6677 
6678 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
6679 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
6680 STAT_ATTR(FREE_FASTPATH, free_fastpath);
6681 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
6682 STAT_ATTR(FREE_FROZEN, free_frozen);
6683 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
6684 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
6685 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
6686 STAT_ATTR(ALLOC_SLAB, alloc_slab);
6687 STAT_ATTR(ALLOC_REFILL, alloc_refill);
6688 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
6689 STAT_ATTR(FREE_SLAB, free_slab);
6690 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
6691 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
6692 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
6693 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
6694 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
6695 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
6696 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
6697 STAT_ATTR(ORDER_FALLBACK, order_fallback);
6698 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
6699 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
6700 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
6701 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
6702 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
6703 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
6704 #endif	/* CONFIG_SLUB_STATS */
6705 
6706 #ifdef CONFIG_KFENCE
6707 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
6708 {
6709 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
6710 }
6711 
6712 static ssize_t skip_kfence_store(struct kmem_cache *s,
6713 			const char *buf, size_t length)
6714 {
6715 	int ret = length;
6716 
6717 	if (buf[0] == '0')
6718 		s->flags &= ~SLAB_SKIP_KFENCE;
6719 	else if (buf[0] == '1')
6720 		s->flags |= SLAB_SKIP_KFENCE;
6721 	else
6722 		ret = -EINVAL;
6723 
6724 	return ret;
6725 }
6726 SLAB_ATTR(skip_kfence);
6727 #endif
6728 
6729 static struct attribute *slab_attrs[] = {
6730 	&slab_size_attr.attr,
6731 	&object_size_attr.attr,
6732 	&objs_per_slab_attr.attr,
6733 	&order_attr.attr,
6734 	&min_partial_attr.attr,
6735 	&cpu_partial_attr.attr,
6736 	&objects_partial_attr.attr,
6737 	&partial_attr.attr,
6738 	&cpu_slabs_attr.attr,
6739 	&ctor_attr.attr,
6740 	&aliases_attr.attr,
6741 	&align_attr.attr,
6742 	&hwcache_align_attr.attr,
6743 	&reclaim_account_attr.attr,
6744 	&destroy_by_rcu_attr.attr,
6745 	&shrink_attr.attr,
6746 	&slabs_cpu_partial_attr.attr,
6747 #ifdef CONFIG_SLUB_DEBUG
6748 	&total_objects_attr.attr,
6749 	&objects_attr.attr,
6750 	&slabs_attr.attr,
6751 	&sanity_checks_attr.attr,
6752 	&trace_attr.attr,
6753 	&red_zone_attr.attr,
6754 	&poison_attr.attr,
6755 	&store_user_attr.attr,
6756 	&validate_attr.attr,
6757 #endif
6758 #ifdef CONFIG_ZONE_DMA
6759 	&cache_dma_attr.attr,
6760 #endif
6761 #ifdef CONFIG_NUMA
6762 	&remote_node_defrag_ratio_attr.attr,
6763 #endif
6764 #ifdef CONFIG_SLUB_STATS
6765 	&alloc_fastpath_attr.attr,
6766 	&alloc_slowpath_attr.attr,
6767 	&free_fastpath_attr.attr,
6768 	&free_slowpath_attr.attr,
6769 	&free_frozen_attr.attr,
6770 	&free_add_partial_attr.attr,
6771 	&free_remove_partial_attr.attr,
6772 	&alloc_from_partial_attr.attr,
6773 	&alloc_slab_attr.attr,
6774 	&alloc_refill_attr.attr,
6775 	&alloc_node_mismatch_attr.attr,
6776 	&free_slab_attr.attr,
6777 	&cpuslab_flush_attr.attr,
6778 	&deactivate_full_attr.attr,
6779 	&deactivate_empty_attr.attr,
6780 	&deactivate_to_head_attr.attr,
6781 	&deactivate_to_tail_attr.attr,
6782 	&deactivate_remote_frees_attr.attr,
6783 	&deactivate_bypass_attr.attr,
6784 	&order_fallback_attr.attr,
6785 	&cmpxchg_double_fail_attr.attr,
6786 	&cmpxchg_double_cpu_fail_attr.attr,
6787 	&cpu_partial_alloc_attr.attr,
6788 	&cpu_partial_free_attr.attr,
6789 	&cpu_partial_node_attr.attr,
6790 	&cpu_partial_drain_attr.attr,
6791 #endif
6792 #ifdef CONFIG_FAILSLAB
6793 	&failslab_attr.attr,
6794 #endif
6795 #ifdef CONFIG_HARDENED_USERCOPY
6796 	&usersize_attr.attr,
6797 #endif
6798 #ifdef CONFIG_KFENCE
6799 	&skip_kfence_attr.attr,
6800 #endif
6801 
6802 	NULL
6803 };
6804 
6805 static const struct attribute_group slab_attr_group = {
6806 	.attrs = slab_attrs,
6807 };
6808 
6809 static ssize_t slab_attr_show(struct kobject *kobj,
6810 				struct attribute *attr,
6811 				char *buf)
6812 {
6813 	struct slab_attribute *attribute;
6814 	struct kmem_cache *s;
6815 
6816 	attribute = to_slab_attr(attr);
6817 	s = to_slab(kobj);
6818 
6819 	if (!attribute->show)
6820 		return -EIO;
6821 
6822 	return attribute->show(s, buf);
6823 }
6824 
6825 static ssize_t slab_attr_store(struct kobject *kobj,
6826 				struct attribute *attr,
6827 				const char *buf, size_t len)
6828 {
6829 	struct slab_attribute *attribute;
6830 	struct kmem_cache *s;
6831 
6832 	attribute = to_slab_attr(attr);
6833 	s = to_slab(kobj);
6834 
6835 	if (!attribute->store)
6836 		return -EIO;
6837 
6838 	return attribute->store(s, buf, len);
6839 }
6840 
6841 static void kmem_cache_release(struct kobject *k)
6842 {
6843 	slab_kmem_cache_release(to_slab(k));
6844 }
6845 
6846 static const struct sysfs_ops slab_sysfs_ops = {
6847 	.show = slab_attr_show,
6848 	.store = slab_attr_store,
6849 };
6850 
6851 static const struct kobj_type slab_ktype = {
6852 	.sysfs_ops = &slab_sysfs_ops,
6853 	.release = kmem_cache_release,
6854 };
6855 
6856 static struct kset *slab_kset;
6857 
6858 static inline struct kset *cache_kset(struct kmem_cache *s)
6859 {
6860 	return slab_kset;
6861 }
6862 
6863 #define ID_STR_LENGTH 32
6864 
6865 /* Create a unique string id for a slab cache:
6866  *
6867  * Format	:[flags-]size
6868  */
6869 static char *create_unique_id(struct kmem_cache *s)
6870 {
6871 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
6872 	char *p = name;
6873 
6874 	if (!name)
6875 		return ERR_PTR(-ENOMEM);
6876 
6877 	*p++ = ':';
6878 	/*
6879 	 * First flags affecting slabcache operations. We will only
6880 	 * get here for aliasable slabs so we do not need to support
6881 	 * too many flags. The flags here must cover all flags that
6882 	 * are matched during merging to guarantee that the id is
6883 	 * unique.
6884 	 */
6885 	if (s->flags & SLAB_CACHE_DMA)
6886 		*p++ = 'd';
6887 	if (s->flags & SLAB_CACHE_DMA32)
6888 		*p++ = 'D';
6889 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
6890 		*p++ = 'a';
6891 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
6892 		*p++ = 'F';
6893 	if (s->flags & SLAB_ACCOUNT)
6894 		*p++ = 'A';
6895 	if (p != name + 1)
6896 		*p++ = '-';
6897 	p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
6898 
6899 	if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
6900 		kfree(name);
6901 		return ERR_PTR(-EINVAL);
6902 	}
6903 	kmsan_unpoison_memory(name, p - name);
6904 	return name;
6905 }
6906 
6907 static int sysfs_slab_add(struct kmem_cache *s)
6908 {
6909 	int err;
6910 	const char *name;
6911 	struct kset *kset = cache_kset(s);
6912 	int unmergeable = slab_unmergeable(s);
6913 
6914 	if (!unmergeable && disable_higher_order_debug &&
6915 			(slub_debug & DEBUG_METADATA_FLAGS))
6916 		unmergeable = 1;
6917 
6918 	if (unmergeable) {
6919 		/*
6920 		 * Slabcache can never be merged so we can use the name proper.
6921 		 * This is typically the case for debug situations. In that
6922 		 * case we can catch duplicate names easily.
6923 		 */
6924 		sysfs_remove_link(&slab_kset->kobj, s->name);
6925 		name = s->name;
6926 	} else {
6927 		/*
6928 		 * Create a unique name for the slab as a target
6929 		 * for the symlinks.
6930 		 */
6931 		name = create_unique_id(s);
6932 		if (IS_ERR(name))
6933 			return PTR_ERR(name);
6934 	}
6935 
6936 	s->kobj.kset = kset;
6937 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
6938 	if (err)
6939 		goto out;
6940 
6941 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
6942 	if (err)
6943 		goto out_del_kobj;
6944 
6945 	if (!unmergeable) {
6946 		/* Setup first alias */
6947 		sysfs_slab_alias(s, s->name);
6948 	}
6949 out:
6950 	if (!unmergeable)
6951 		kfree(name);
6952 	return err;
6953 out_del_kobj:
6954 	kobject_del(&s->kobj);
6955 	goto out;
6956 }
6957 
6958 void sysfs_slab_unlink(struct kmem_cache *s)
6959 {
6960 	kobject_del(&s->kobj);
6961 }
6962 
6963 void sysfs_slab_release(struct kmem_cache *s)
6964 {
6965 	kobject_put(&s->kobj);
6966 }
6967 
6968 /*
6969  * Need to buffer aliases during bootup until sysfs becomes
6970  * available lest we lose that information.
6971  */
6972 struct saved_alias {
6973 	struct kmem_cache *s;
6974 	const char *name;
6975 	struct saved_alias *next;
6976 };
6977 
6978 static struct saved_alias *alias_list;
6979 
6980 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
6981 {
6982 	struct saved_alias *al;
6983 
6984 	if (slab_state == FULL) {
6985 		/*
6986 		 * If we have a leftover link then remove it.
6987 		 */
6988 		sysfs_remove_link(&slab_kset->kobj, name);
6989 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
6990 	}
6991 
6992 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
6993 	if (!al)
6994 		return -ENOMEM;
6995 
6996 	al->s = s;
6997 	al->name = name;
6998 	al->next = alias_list;
6999 	alias_list = al;
7000 	kmsan_unpoison_memory(al, sizeof(*al));
7001 	return 0;
7002 }
7003 
7004 static int __init slab_sysfs_init(void)
7005 {
7006 	struct kmem_cache *s;
7007 	int err;
7008 
7009 	mutex_lock(&slab_mutex);
7010 
7011 	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
7012 	if (!slab_kset) {
7013 		mutex_unlock(&slab_mutex);
7014 		pr_err("Cannot register slab subsystem.\n");
7015 		return -ENOMEM;
7016 	}
7017 
7018 	slab_state = FULL;
7019 
7020 	list_for_each_entry(s, &slab_caches, list) {
7021 		err = sysfs_slab_add(s);
7022 		if (err)
7023 			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
7024 			       s->name);
7025 	}
7026 
7027 	while (alias_list) {
7028 		struct saved_alias *al = alias_list;
7029 
7030 		alias_list = alias_list->next;
7031 		err = sysfs_slab_alias(al->s, al->name);
7032 		if (err)
7033 			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
7034 			       al->name);
7035 		kfree(al);
7036 	}
7037 
7038 	mutex_unlock(&slab_mutex);
7039 	return 0;
7040 }
7041 late_initcall(slab_sysfs_init);
7042 #endif /* SLAB_SUPPORTS_SYSFS */
7043 
7044 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
7045 static int slab_debugfs_show(struct seq_file *seq, void *v)
7046 {
7047 	struct loc_track *t = seq->private;
7048 	struct location *l;
7049 	unsigned long idx;
7050 
7051 	idx = (unsigned long) t->idx;
7052 	if (idx < t->count) {
7053 		l = &t->loc[idx];
7054 
7055 		seq_printf(seq, "%7ld ", l->count);
7056 
7057 		if (l->addr)
7058 			seq_printf(seq, "%pS", (void *)l->addr);
7059 		else
7060 			seq_puts(seq, "<not-available>");
7061 
7062 		if (l->waste)
7063 			seq_printf(seq, " waste=%lu/%lu",
7064 				l->count * l->waste, l->waste);
7065 
7066 		if (l->sum_time != l->min_time) {
7067 			seq_printf(seq, " age=%ld/%llu/%ld",
7068 				l->min_time, div_u64(l->sum_time, l->count),
7069 				l->max_time);
7070 		} else
7071 			seq_printf(seq, " age=%ld", l->min_time);
7072 
7073 		if (l->min_pid != l->max_pid)
7074 			seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
7075 		else
7076 			seq_printf(seq, " pid=%ld",
7077 				l->min_pid);
7078 
7079 		if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
7080 			seq_printf(seq, " cpus=%*pbl",
7081 				 cpumask_pr_args(to_cpumask(l->cpus)));
7082 
7083 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
7084 			seq_printf(seq, " nodes=%*pbl",
7085 				 nodemask_pr_args(&l->nodes));
7086 
7087 #ifdef CONFIG_STACKDEPOT
7088 		{
7089 			depot_stack_handle_t handle;
7090 			unsigned long *entries;
7091 			unsigned int nr_entries, j;
7092 
7093 			handle = READ_ONCE(l->handle);
7094 			if (handle) {
7095 				nr_entries = stack_depot_fetch(handle, &entries);
7096 				seq_puts(seq, "\n");
7097 				for (j = 0; j < nr_entries; j++)
7098 					seq_printf(seq, "        %pS\n", (void *)entries[j]);
7099 			}
7100 		}
7101 #endif
7102 		seq_puts(seq, "\n");
7103 	}
7104 
7105 	if (!idx && !t->count)
7106 		seq_puts(seq, "No data\n");
7107 
7108 	return 0;
7109 }
7110 
7111 static void slab_debugfs_stop(struct seq_file *seq, void *v)
7112 {
7113 }
7114 
7115 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
7116 {
7117 	struct loc_track *t = seq->private;
7118 
7119 	t->idx = ++(*ppos);
7120 	if (*ppos <= t->count)
7121 		return ppos;
7122 
7123 	return NULL;
7124 }
7125 
7126 static int cmp_loc_by_count(const void *a, const void *b, const void *data)
7127 {
7128 	struct location *loc1 = (struct location *)a;
7129 	struct location *loc2 = (struct location *)b;
7130 
7131 	if (loc1->count > loc2->count)
7132 		return -1;
7133 	else
7134 		return 1;
7135 }
7136 
7137 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
7138 {
7139 	struct loc_track *t = seq->private;
7140 
7141 	t->idx = *ppos;
7142 	return ppos;
7143 }
7144 
7145 static const struct seq_operations slab_debugfs_sops = {
7146 	.start  = slab_debugfs_start,
7147 	.next   = slab_debugfs_next,
7148 	.stop   = slab_debugfs_stop,
7149 	.show   = slab_debugfs_show,
7150 };
7151 
7152 static int slab_debug_trace_open(struct inode *inode, struct file *filep)
7153 {
7154 
7155 	struct kmem_cache_node *n;
7156 	enum track_item alloc;
7157 	int node;
7158 	struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
7159 						sizeof(struct loc_track));
7160 	struct kmem_cache *s = file_inode(filep)->i_private;
7161 	unsigned long *obj_map;
7162 
7163 	if (!t)
7164 		return -ENOMEM;
7165 
7166 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
7167 	if (!obj_map) {
7168 		seq_release_private(inode, filep);
7169 		return -ENOMEM;
7170 	}
7171 
7172 	if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
7173 		alloc = TRACK_ALLOC;
7174 	else
7175 		alloc = TRACK_FREE;
7176 
7177 	if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
7178 		bitmap_free(obj_map);
7179 		seq_release_private(inode, filep);
7180 		return -ENOMEM;
7181 	}
7182 
7183 	for_each_kmem_cache_node(s, node, n) {
7184 		unsigned long flags;
7185 		struct slab *slab;
7186 
7187 		if (!node_nr_slabs(n))
7188 			continue;
7189 
7190 		spin_lock_irqsave(&n->list_lock, flags);
7191 		list_for_each_entry(slab, &n->partial, slab_list)
7192 			process_slab(t, s, slab, alloc, obj_map);
7193 		list_for_each_entry(slab, &n->full, slab_list)
7194 			process_slab(t, s, slab, alloc, obj_map);
7195 		spin_unlock_irqrestore(&n->list_lock, flags);
7196 	}
7197 
7198 	/* Sort locations by count */
7199 	sort_r(t->loc, t->count, sizeof(struct location),
7200 		cmp_loc_by_count, NULL, NULL);
7201 
7202 	bitmap_free(obj_map);
7203 	return 0;
7204 }
7205 
7206 static int slab_debug_trace_release(struct inode *inode, struct file *file)
7207 {
7208 	struct seq_file *seq = file->private_data;
7209 	struct loc_track *t = seq->private;
7210 
7211 	free_loc_track(t);
7212 	return seq_release_private(inode, file);
7213 }
7214 
7215 static const struct file_operations slab_debugfs_fops = {
7216 	.open    = slab_debug_trace_open,
7217 	.read    = seq_read,
7218 	.llseek  = seq_lseek,
7219 	.release = slab_debug_trace_release,
7220 };
7221 
7222 static void debugfs_slab_add(struct kmem_cache *s)
7223 {
7224 	struct dentry *slab_cache_dir;
7225 
7226 	if (unlikely(!slab_debugfs_root))
7227 		return;
7228 
7229 	slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
7230 
7231 	debugfs_create_file("alloc_traces", 0400,
7232 		slab_cache_dir, s, &slab_debugfs_fops);
7233 
7234 	debugfs_create_file("free_traces", 0400,
7235 		slab_cache_dir, s, &slab_debugfs_fops);
7236 }
7237 
7238 void debugfs_slab_release(struct kmem_cache *s)
7239 {
7240 	debugfs_lookup_and_remove(s->name, slab_debugfs_root);
7241 }
7242 
7243 static int __init slab_debugfs_init(void)
7244 {
7245 	struct kmem_cache *s;
7246 
7247 	slab_debugfs_root = debugfs_create_dir("slab", NULL);
7248 
7249 	list_for_each_entry(s, &slab_caches, list)
7250 		if (s->flags & SLAB_STORE_USER)
7251 			debugfs_slab_add(s);
7252 
7253 	return 0;
7254 
7255 }
7256 __initcall(slab_debugfs_init);
7257 #endif
7258 /*
7259  * The /proc/slabinfo ABI
7260  */
7261 #ifdef CONFIG_SLUB_DEBUG
7262 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
7263 {
7264 	unsigned long nr_slabs = 0;
7265 	unsigned long nr_objs = 0;
7266 	unsigned long nr_free = 0;
7267 	int node;
7268 	struct kmem_cache_node *n;
7269 
7270 	for_each_kmem_cache_node(s, node, n) {
7271 		nr_slabs += node_nr_slabs(n);
7272 		nr_objs += node_nr_objs(n);
7273 		nr_free += count_partial_free_approx(n);
7274 	}
7275 
7276 	sinfo->active_objs = nr_objs - nr_free;
7277 	sinfo->num_objs = nr_objs;
7278 	sinfo->active_slabs = nr_slabs;
7279 	sinfo->num_slabs = nr_slabs;
7280 	sinfo->objects_per_slab = oo_objects(s->oo);
7281 	sinfo->cache_order = oo_order(s->oo);
7282 }
7283 #endif /* CONFIG_SLUB_DEBUG */
7284