xref: /linux/mm/slub.c (revision 436381eaf2a423e60fc8340399f7d2458091b383)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SLUB: A slab allocator that limits cache line use instead of queuing
4  * objects in per cpu and per node lists.
5  *
6  * The allocator synchronizes using per slab locks or atomic operations
7  * and only uses a centralized lock to manage a pool of partial slabs.
8  *
9  * (C) 2007 SGI, Christoph Lameter
10  * (C) 2011 Linux Foundation, Christoph Lameter
11  */
12 
13 #include <linux/mm.h>
14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */
15 #include <linux/module.h>
16 #include <linux/bit_spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/swab.h>
19 #include <linux/bitops.h>
20 #include <linux/slab.h>
21 #include "slab.h"
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/kasan.h>
25 #include <linux/kmsan.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/mempolicy.h>
29 #include <linux/ctype.h>
30 #include <linux/stackdepot.h>
31 #include <linux/debugobjects.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kfence.h>
34 #include <linux/memory.h>
35 #include <linux/math64.h>
36 #include <linux/fault-inject.h>
37 #include <linux/kmemleak.h>
38 #include <linux/stacktrace.h>
39 #include <linux/prefetch.h>
40 #include <linux/memcontrol.h>
41 #include <linux/random.h>
42 #include <kunit/test.h>
43 #include <kunit/test-bug.h>
44 #include <linux/sort.h>
45 
46 #include <linux/debugfs.h>
47 #include <trace/events/kmem.h>
48 
49 #include "internal.h"
50 
51 /*
52  * Lock order:
53  *   1. slab_mutex (Global Mutex)
54  *   2. node->list_lock (Spinlock)
55  *   3. kmem_cache->cpu_slab->lock (Local lock)
56  *   4. slab_lock(slab) (Only on some arches)
57  *   5. object_map_lock (Only for debugging)
58  *
59  *   slab_mutex
60  *
61  *   The role of the slab_mutex is to protect the list of all the slabs
62  *   and to synchronize major metadata changes to slab cache structures.
63  *   Also synchronizes memory hotplug callbacks.
64  *
65  *   slab_lock
66  *
67  *   The slab_lock is a wrapper around the page lock, thus it is a bit
68  *   spinlock.
69  *
70  *   The slab_lock is only used on arches that do not have the ability
71  *   to do a cmpxchg_double. It only protects:
72  *
73  *	A. slab->freelist	-> List of free objects in a slab
74  *	B. slab->inuse		-> Number of objects in use
75  *	C. slab->objects	-> Number of objects in slab
76  *	D. slab->frozen		-> frozen state
77  *
78  *   Frozen slabs
79  *
80  *   If a slab is frozen then it is exempt from list management. It is
81  *   the cpu slab which is actively allocated from by the processor that
82  *   froze it and it is not on any list. The processor that froze the
83  *   slab is the one who can perform list operations on the slab. Other
84  *   processors may put objects onto the freelist but the processor that
85  *   froze the slab is the only one that can retrieve the objects from the
86  *   slab's freelist.
87  *
88  *   CPU partial slabs
89  *
90  *   The partially empty slabs cached on the CPU partial list are used
91  *   for performance reasons, which speeds up the allocation process.
92  *   These slabs are not frozen, but are also exempt from list management,
93  *   by clearing the PG_workingset flag when moving out of the node
94  *   partial list. Please see __slab_free() for more details.
95  *
96  *   To sum up, the current scheme is:
97  *   - node partial slab: PG_Workingset && !frozen
98  *   - cpu partial slab: !PG_Workingset && !frozen
99  *   - cpu slab: !PG_Workingset && frozen
100  *   - full slab: !PG_Workingset && !frozen
101  *
102  *   list_lock
103  *
104  *   The list_lock protects the partial and full list on each node and
105  *   the partial slab counter. If taken then no new slabs may be added or
106  *   removed from the lists nor make the number of partial slabs be modified.
107  *   (Note that the total number of slabs is an atomic value that may be
108  *   modified without taking the list lock).
109  *
110  *   The list_lock is a centralized lock and thus we avoid taking it as
111  *   much as possible. As long as SLUB does not have to handle partial
112  *   slabs, operations can continue without any centralized lock. F.e.
113  *   allocating a long series of objects that fill up slabs does not require
114  *   the list lock.
115  *
116  *   For debug caches, all allocations are forced to go through a list_lock
117  *   protected region to serialize against concurrent validation.
118  *
119  *   cpu_slab->lock local lock
120  *
121  *   This locks protect slowpath manipulation of all kmem_cache_cpu fields
122  *   except the stat counters. This is a percpu structure manipulated only by
123  *   the local cpu, so the lock protects against being preempted or interrupted
124  *   by an irq. Fast path operations rely on lockless operations instead.
125  *
126  *   On PREEMPT_RT, the local lock neither disables interrupts nor preemption
127  *   which means the lockless fastpath cannot be used as it might interfere with
128  *   an in-progress slow path operations. In this case the local lock is always
129  *   taken but it still utilizes the freelist for the common operations.
130  *
131  *   lockless fastpaths
132  *
133  *   The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
134  *   are fully lockless when satisfied from the percpu slab (and when
135  *   cmpxchg_double is possible to use, otherwise slab_lock is taken).
136  *   They also don't disable preemption or migration or irqs. They rely on
137  *   the transaction id (tid) field to detect being preempted or moved to
138  *   another cpu.
139  *
140  *   irq, preemption, migration considerations
141  *
142  *   Interrupts are disabled as part of list_lock or local_lock operations, or
143  *   around the slab_lock operation, in order to make the slab allocator safe
144  *   to use in the context of an irq.
145  *
146  *   In addition, preemption (or migration on PREEMPT_RT) is disabled in the
147  *   allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
148  *   local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
149  *   doesn't have to be revalidated in each section protected by the local lock.
150  *
151  * SLUB assigns one slab for allocation to each processor.
152  * Allocations only occur from these slabs called cpu slabs.
153  *
154  * Slabs with free elements are kept on a partial list and during regular
155  * operations no list for full slabs is used. If an object in a full slab is
156  * freed then the slab will show up again on the partial lists.
157  * We track full slabs for debugging purposes though because otherwise we
158  * cannot scan all objects.
159  *
160  * Slabs are freed when they become empty. Teardown and setup is
161  * minimal so we rely on the page allocators per cpu caches for
162  * fast frees and allocs.
163  *
164  * slab->frozen		The slab is frozen and exempt from list processing.
165  * 			This means that the slab is dedicated to a purpose
166  * 			such as satisfying allocations for a specific
167  * 			processor. Objects may be freed in the slab while
168  * 			it is frozen but slab_free will then skip the usual
169  * 			list operations. It is up to the processor holding
170  * 			the slab to integrate the slab into the slab lists
171  * 			when the slab is no longer needed.
172  *
173  * 			One use of this flag is to mark slabs that are
174  * 			used for allocations. Then such a slab becomes a cpu
175  * 			slab. The cpu slab may be equipped with an additional
176  * 			freelist that allows lockless access to
177  * 			free objects in addition to the regular freelist
178  * 			that requires the slab lock.
179  *
180  * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
181  * 			options set. This moves	slab handling out of
182  * 			the fast path and disables lockless freelists.
183  */
184 
185 /*
186  * We could simply use migrate_disable()/enable() but as long as it's a
187  * function call even on !PREEMPT_RT, use inline preempt_disable() there.
188  */
189 #ifndef CONFIG_PREEMPT_RT
190 #define slub_get_cpu_ptr(var)		get_cpu_ptr(var)
191 #define slub_put_cpu_ptr(var)		put_cpu_ptr(var)
192 #define USE_LOCKLESS_FAST_PATH()	(true)
193 #else
194 #define slub_get_cpu_ptr(var)		\
195 ({					\
196 	migrate_disable();		\
197 	this_cpu_ptr(var);		\
198 })
199 #define slub_put_cpu_ptr(var)		\
200 do {					\
201 	(void)(var);			\
202 	migrate_enable();		\
203 } while (0)
204 #define USE_LOCKLESS_FAST_PATH()	(false)
205 #endif
206 
207 #ifndef CONFIG_SLUB_TINY
208 #define __fastpath_inline __always_inline
209 #else
210 #define __fastpath_inline
211 #endif
212 
213 #ifdef CONFIG_SLUB_DEBUG
214 #ifdef CONFIG_SLUB_DEBUG_ON
215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
216 #else
217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
218 #endif
219 #endif		/* CONFIG_SLUB_DEBUG */
220 
221 /* Structure holding parameters for get_partial() call chain */
222 struct partial_context {
223 	gfp_t flags;
224 	unsigned int orig_size;
225 	void *object;
226 };
227 
228 static inline bool kmem_cache_debug(struct kmem_cache *s)
229 {
230 	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
231 }
232 
233 static inline bool slub_debug_orig_size(struct kmem_cache *s)
234 {
235 	return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
236 			(s->flags & SLAB_KMALLOC));
237 }
238 
239 void *fixup_red_left(struct kmem_cache *s, void *p)
240 {
241 	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
242 		p += s->red_left_pad;
243 
244 	return p;
245 }
246 
247 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
248 {
249 #ifdef CONFIG_SLUB_CPU_PARTIAL
250 	return !kmem_cache_debug(s);
251 #else
252 	return false;
253 #endif
254 }
255 
256 /*
257  * Issues still to be resolved:
258  *
259  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
260  *
261  * - Variable sizing of the per node arrays
262  */
263 
264 /* Enable to log cmpxchg failures */
265 #undef SLUB_DEBUG_CMPXCHG
266 
267 #ifndef CONFIG_SLUB_TINY
268 /*
269  * Minimum number of partial slabs. These will be left on the partial
270  * lists even if they are empty. kmem_cache_shrink may reclaim them.
271  */
272 #define MIN_PARTIAL 5
273 
274 /*
275  * Maximum number of desirable partial slabs.
276  * The existence of more partial slabs makes kmem_cache_shrink
277  * sort the partial list by the number of objects in use.
278  */
279 #define MAX_PARTIAL 10
280 #else
281 #define MIN_PARTIAL 0
282 #define MAX_PARTIAL 0
283 #endif
284 
285 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
286 				SLAB_POISON | SLAB_STORE_USER)
287 
288 /*
289  * These debug flags cannot use CMPXCHG because there might be consistency
290  * issues when checking or reading debug information
291  */
292 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
293 				SLAB_TRACE)
294 
295 
296 /*
297  * Debugging flags that require metadata to be stored in the slab.  These get
298  * disabled when slab_debug=O is used and a cache's min order increases with
299  * metadata.
300  */
301 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
302 
303 #define OO_SHIFT	16
304 #define OO_MASK		((1 << OO_SHIFT) - 1)
305 #define MAX_OBJS_PER_PAGE	32767 /* since slab.objects is u15 */
306 
307 /* Internal SLUB flags */
308 /* Poison object */
309 #define __OBJECT_POISON		__SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
310 /* Use cmpxchg_double */
311 
312 #ifdef system_has_freelist_aba
313 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
314 #else
315 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_UNUSED
316 #endif
317 
318 /*
319  * Tracking user of a slab.
320  */
321 #define TRACK_ADDRS_COUNT 16
322 struct track {
323 	unsigned long addr;	/* Called from address */
324 #ifdef CONFIG_STACKDEPOT
325 	depot_stack_handle_t handle;
326 #endif
327 	int cpu;		/* Was running on cpu */
328 	int pid;		/* Pid context */
329 	unsigned long when;	/* When did the operation occur */
330 };
331 
332 enum track_item { TRACK_ALLOC, TRACK_FREE };
333 
334 #ifdef SLAB_SUPPORTS_SYSFS
335 static int sysfs_slab_add(struct kmem_cache *);
336 static int sysfs_slab_alias(struct kmem_cache *, const char *);
337 #else
338 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
339 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
340 							{ return 0; }
341 #endif
342 
343 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
344 static void debugfs_slab_add(struct kmem_cache *);
345 #else
346 static inline void debugfs_slab_add(struct kmem_cache *s) { }
347 #endif
348 
349 enum stat_item {
350 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
351 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
352 	FREE_FASTPATH,		/* Free to cpu slab */
353 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
354 	FREE_FROZEN,		/* Freeing to frozen slab */
355 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
356 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
357 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
358 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
359 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
360 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
361 	FREE_SLAB,		/* Slab freed to the page allocator */
362 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
363 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
364 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
365 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
366 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
367 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
368 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
369 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
370 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
371 	CMPXCHG_DOUBLE_FAIL,	/* Failures of slab freelist update */
372 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
373 	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
374 	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
375 	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
376 	NR_SLUB_STAT_ITEMS
377 };
378 
379 #ifndef CONFIG_SLUB_TINY
380 /*
381  * When changing the layout, make sure freelist and tid are still compatible
382  * with this_cpu_cmpxchg_double() alignment requirements.
383  */
384 struct kmem_cache_cpu {
385 	union {
386 		struct {
387 			void **freelist;	/* Pointer to next available object */
388 			unsigned long tid;	/* Globally unique transaction id */
389 		};
390 		freelist_aba_t freelist_tid;
391 	};
392 	struct slab *slab;	/* The slab from which we are allocating */
393 #ifdef CONFIG_SLUB_CPU_PARTIAL
394 	struct slab *partial;	/* Partially allocated slabs */
395 #endif
396 	local_lock_t lock;	/* Protects the fields above */
397 #ifdef CONFIG_SLUB_STATS
398 	unsigned int stat[NR_SLUB_STAT_ITEMS];
399 #endif
400 };
401 #endif /* CONFIG_SLUB_TINY */
402 
403 static inline void stat(const struct kmem_cache *s, enum stat_item si)
404 {
405 #ifdef CONFIG_SLUB_STATS
406 	/*
407 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
408 	 * avoid this_cpu_add()'s irq-disable overhead.
409 	 */
410 	raw_cpu_inc(s->cpu_slab->stat[si]);
411 #endif
412 }
413 
414 static inline
415 void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
416 {
417 #ifdef CONFIG_SLUB_STATS
418 	raw_cpu_add(s->cpu_slab->stat[si], v);
419 #endif
420 }
421 
422 /*
423  * The slab lists for all objects.
424  */
425 struct kmem_cache_node {
426 	spinlock_t list_lock;
427 	unsigned long nr_partial;
428 	struct list_head partial;
429 #ifdef CONFIG_SLUB_DEBUG
430 	atomic_long_t nr_slabs;
431 	atomic_long_t total_objects;
432 	struct list_head full;
433 #endif
434 };
435 
436 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
437 {
438 	return s->node[node];
439 }
440 
441 /*
442  * Iterator over all nodes. The body will be executed for each node that has
443  * a kmem_cache_node structure allocated (which is true for all online nodes)
444  */
445 #define for_each_kmem_cache_node(__s, __node, __n) \
446 	for (__node = 0; __node < nr_node_ids; __node++) \
447 		 if ((__n = get_node(__s, __node)))
448 
449 /*
450  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
451  * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
452  * differ during memory hotplug/hotremove operations.
453  * Protected by slab_mutex.
454  */
455 static nodemask_t slab_nodes;
456 
457 #ifndef CONFIG_SLUB_TINY
458 /*
459  * Workqueue used for flush_cpu_slab().
460  */
461 static struct workqueue_struct *flushwq;
462 #endif
463 
464 /********************************************************************
465  * 			Core slab cache functions
466  *******************************************************************/
467 
468 /*
469  * freeptr_t represents a SLUB freelist pointer, which might be encoded
470  * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
471  */
472 typedef struct { unsigned long v; } freeptr_t;
473 
474 /*
475  * Returns freelist pointer (ptr). With hardening, this is obfuscated
476  * with an XOR of the address where the pointer is held and a per-cache
477  * random number.
478  */
479 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
480 					    void *ptr, unsigned long ptr_addr)
481 {
482 	unsigned long encoded;
483 
484 #ifdef CONFIG_SLAB_FREELIST_HARDENED
485 	encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
486 #else
487 	encoded = (unsigned long)ptr;
488 #endif
489 	return (freeptr_t){.v = encoded};
490 }
491 
492 static inline void *freelist_ptr_decode(const struct kmem_cache *s,
493 					freeptr_t ptr, unsigned long ptr_addr)
494 {
495 	void *decoded;
496 
497 #ifdef CONFIG_SLAB_FREELIST_HARDENED
498 	decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
499 #else
500 	decoded = (void *)ptr.v;
501 #endif
502 	return decoded;
503 }
504 
505 static inline void *get_freepointer(struct kmem_cache *s, void *object)
506 {
507 	unsigned long ptr_addr;
508 	freeptr_t p;
509 
510 	object = kasan_reset_tag(object);
511 	ptr_addr = (unsigned long)object + s->offset;
512 	p = *(freeptr_t *)(ptr_addr);
513 	return freelist_ptr_decode(s, p, ptr_addr);
514 }
515 
516 #ifndef CONFIG_SLUB_TINY
517 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
518 {
519 	prefetchw(object + s->offset);
520 }
521 #endif
522 
523 /*
524  * When running under KMSAN, get_freepointer_safe() may return an uninitialized
525  * pointer value in the case the current thread loses the race for the next
526  * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
527  * slab_alloc_node() will fail, so the uninitialized value won't be used, but
528  * KMSAN will still check all arguments of cmpxchg because of imperfect
529  * handling of inline assembly.
530  * To work around this problem, we apply __no_kmsan_checks to ensure that
531  * get_freepointer_safe() returns initialized memory.
532  */
533 __no_kmsan_checks
534 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
535 {
536 	unsigned long freepointer_addr;
537 	freeptr_t p;
538 
539 	if (!debug_pagealloc_enabled_static())
540 		return get_freepointer(s, object);
541 
542 	object = kasan_reset_tag(object);
543 	freepointer_addr = (unsigned long)object + s->offset;
544 	copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p));
545 	return freelist_ptr_decode(s, p, freepointer_addr);
546 }
547 
548 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
549 {
550 	unsigned long freeptr_addr = (unsigned long)object + s->offset;
551 
552 #ifdef CONFIG_SLAB_FREELIST_HARDENED
553 	BUG_ON(object == fp); /* naive detection of double free or corruption */
554 #endif
555 
556 	freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
557 	*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
558 }
559 
560 /*
561  * See comment in calculate_sizes().
562  */
563 static inline bool freeptr_outside_object(struct kmem_cache *s)
564 {
565 	return s->offset >= s->inuse;
566 }
567 
568 /*
569  * Return offset of the end of info block which is inuse + free pointer if
570  * not overlapping with object.
571  */
572 static inline unsigned int get_info_end(struct kmem_cache *s)
573 {
574 	if (freeptr_outside_object(s))
575 		return s->inuse + sizeof(void *);
576 	else
577 		return s->inuse;
578 }
579 
580 /* Loop over all objects in a slab */
581 #define for_each_object(__p, __s, __addr, __objects) \
582 	for (__p = fixup_red_left(__s, __addr); \
583 		__p < (__addr) + (__objects) * (__s)->size; \
584 		__p += (__s)->size)
585 
586 static inline unsigned int order_objects(unsigned int order, unsigned int size)
587 {
588 	return ((unsigned int)PAGE_SIZE << order) / size;
589 }
590 
591 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
592 		unsigned int size)
593 {
594 	struct kmem_cache_order_objects x = {
595 		(order << OO_SHIFT) + order_objects(order, size)
596 	};
597 
598 	return x;
599 }
600 
601 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
602 {
603 	return x.x >> OO_SHIFT;
604 }
605 
606 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
607 {
608 	return x.x & OO_MASK;
609 }
610 
611 #ifdef CONFIG_SLUB_CPU_PARTIAL
612 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
613 {
614 	unsigned int nr_slabs;
615 
616 	s->cpu_partial = nr_objects;
617 
618 	/*
619 	 * We take the number of objects but actually limit the number of
620 	 * slabs on the per cpu partial list, in order to limit excessive
621 	 * growth of the list. For simplicity we assume that the slabs will
622 	 * be half-full.
623 	 */
624 	nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
625 	s->cpu_partial_slabs = nr_slabs;
626 }
627 
628 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
629 {
630 	return s->cpu_partial_slabs;
631 }
632 #else
633 static inline void
634 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
635 {
636 }
637 
638 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
639 {
640 	return 0;
641 }
642 #endif /* CONFIG_SLUB_CPU_PARTIAL */
643 
644 /*
645  * Per slab locking using the pagelock
646  */
647 static __always_inline void slab_lock(struct slab *slab)
648 {
649 	bit_spin_lock(PG_locked, &slab->__page_flags);
650 }
651 
652 static __always_inline void slab_unlock(struct slab *slab)
653 {
654 	bit_spin_unlock(PG_locked, &slab->__page_flags);
655 }
656 
657 static inline bool
658 __update_freelist_fast(struct slab *slab,
659 		      void *freelist_old, unsigned long counters_old,
660 		      void *freelist_new, unsigned long counters_new)
661 {
662 #ifdef system_has_freelist_aba
663 	freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
664 	freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
665 
666 	return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
667 #else
668 	return false;
669 #endif
670 }
671 
672 static inline bool
673 __update_freelist_slow(struct slab *slab,
674 		      void *freelist_old, unsigned long counters_old,
675 		      void *freelist_new, unsigned long counters_new)
676 {
677 	bool ret = false;
678 
679 	slab_lock(slab);
680 	if (slab->freelist == freelist_old &&
681 	    slab->counters == counters_old) {
682 		slab->freelist = freelist_new;
683 		slab->counters = counters_new;
684 		ret = true;
685 	}
686 	slab_unlock(slab);
687 
688 	return ret;
689 }
690 
691 /*
692  * Interrupts must be disabled (for the fallback code to work right), typically
693  * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
694  * part of bit_spin_lock(), is sufficient because the policy is not to allow any
695  * allocation/ free operation in hardirq context. Therefore nothing can
696  * interrupt the operation.
697  */
698 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
699 		void *freelist_old, unsigned long counters_old,
700 		void *freelist_new, unsigned long counters_new,
701 		const char *n)
702 {
703 	bool ret;
704 
705 	if (USE_LOCKLESS_FAST_PATH())
706 		lockdep_assert_irqs_disabled();
707 
708 	if (s->flags & __CMPXCHG_DOUBLE) {
709 		ret = __update_freelist_fast(slab, freelist_old, counters_old,
710 				            freelist_new, counters_new);
711 	} else {
712 		ret = __update_freelist_slow(slab, freelist_old, counters_old,
713 				            freelist_new, counters_new);
714 	}
715 	if (likely(ret))
716 		return true;
717 
718 	cpu_relax();
719 	stat(s, CMPXCHG_DOUBLE_FAIL);
720 
721 #ifdef SLUB_DEBUG_CMPXCHG
722 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
723 #endif
724 
725 	return false;
726 }
727 
728 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
729 		void *freelist_old, unsigned long counters_old,
730 		void *freelist_new, unsigned long counters_new,
731 		const char *n)
732 {
733 	bool ret;
734 
735 	if (s->flags & __CMPXCHG_DOUBLE) {
736 		ret = __update_freelist_fast(slab, freelist_old, counters_old,
737 				            freelist_new, counters_new);
738 	} else {
739 		unsigned long flags;
740 
741 		local_irq_save(flags);
742 		ret = __update_freelist_slow(slab, freelist_old, counters_old,
743 				            freelist_new, counters_new);
744 		local_irq_restore(flags);
745 	}
746 	if (likely(ret))
747 		return true;
748 
749 	cpu_relax();
750 	stat(s, CMPXCHG_DOUBLE_FAIL);
751 
752 #ifdef SLUB_DEBUG_CMPXCHG
753 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
754 #endif
755 
756 	return false;
757 }
758 
759 #ifdef CONFIG_SLUB_DEBUG
760 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
761 static DEFINE_SPINLOCK(object_map_lock);
762 
763 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
764 		       struct slab *slab)
765 {
766 	void *addr = slab_address(slab);
767 	void *p;
768 
769 	bitmap_zero(obj_map, slab->objects);
770 
771 	for (p = slab->freelist; p; p = get_freepointer(s, p))
772 		set_bit(__obj_to_index(s, addr, p), obj_map);
773 }
774 
775 #if IS_ENABLED(CONFIG_KUNIT)
776 static bool slab_add_kunit_errors(void)
777 {
778 	struct kunit_resource *resource;
779 
780 	if (!kunit_get_current_test())
781 		return false;
782 
783 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
784 	if (!resource)
785 		return false;
786 
787 	(*(int *)resource->data)++;
788 	kunit_put_resource(resource);
789 	return true;
790 }
791 
792 static bool slab_in_kunit_test(void)
793 {
794 	struct kunit_resource *resource;
795 
796 	if (!kunit_get_current_test())
797 		return false;
798 
799 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
800 	if (!resource)
801 		return false;
802 
803 	kunit_put_resource(resource);
804 	return true;
805 }
806 #else
807 static inline bool slab_add_kunit_errors(void) { return false; }
808 static inline bool slab_in_kunit_test(void) { return false; }
809 #endif
810 
811 static inline unsigned int size_from_object(struct kmem_cache *s)
812 {
813 	if (s->flags & SLAB_RED_ZONE)
814 		return s->size - s->red_left_pad;
815 
816 	return s->size;
817 }
818 
819 static inline void *restore_red_left(struct kmem_cache *s, void *p)
820 {
821 	if (s->flags & SLAB_RED_ZONE)
822 		p -= s->red_left_pad;
823 
824 	return p;
825 }
826 
827 /*
828  * Debug settings:
829  */
830 #if defined(CONFIG_SLUB_DEBUG_ON)
831 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
832 #else
833 static slab_flags_t slub_debug;
834 #endif
835 
836 static char *slub_debug_string;
837 static int disable_higher_order_debug;
838 
839 /*
840  * slub is about to manipulate internal object metadata.  This memory lies
841  * outside the range of the allocated object, so accessing it would normally
842  * be reported by kasan as a bounds error.  metadata_access_enable() is used
843  * to tell kasan that these accesses are OK.
844  */
845 static inline void metadata_access_enable(void)
846 {
847 	kasan_disable_current();
848 }
849 
850 static inline void metadata_access_disable(void)
851 {
852 	kasan_enable_current();
853 }
854 
855 /*
856  * Object debugging
857  */
858 
859 /* Verify that a pointer has an address that is valid within a slab page */
860 static inline int check_valid_pointer(struct kmem_cache *s,
861 				struct slab *slab, void *object)
862 {
863 	void *base;
864 
865 	if (!object)
866 		return 1;
867 
868 	base = slab_address(slab);
869 	object = kasan_reset_tag(object);
870 	object = restore_red_left(s, object);
871 	if (object < base || object >= base + slab->objects * s->size ||
872 		(object - base) % s->size) {
873 		return 0;
874 	}
875 
876 	return 1;
877 }
878 
879 static void print_section(char *level, char *text, u8 *addr,
880 			  unsigned int length)
881 {
882 	metadata_access_enable();
883 	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
884 			16, 1, kasan_reset_tag((void *)addr), length, 1);
885 	metadata_access_disable();
886 }
887 
888 static struct track *get_track(struct kmem_cache *s, void *object,
889 	enum track_item alloc)
890 {
891 	struct track *p;
892 
893 	p = object + get_info_end(s);
894 
895 	return kasan_reset_tag(p + alloc);
896 }
897 
898 #ifdef CONFIG_STACKDEPOT
899 static noinline depot_stack_handle_t set_track_prepare(void)
900 {
901 	depot_stack_handle_t handle;
902 	unsigned long entries[TRACK_ADDRS_COUNT];
903 	unsigned int nr_entries;
904 
905 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
906 	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
907 
908 	return handle;
909 }
910 #else
911 static inline depot_stack_handle_t set_track_prepare(void)
912 {
913 	return 0;
914 }
915 #endif
916 
917 static void set_track_update(struct kmem_cache *s, void *object,
918 			     enum track_item alloc, unsigned long addr,
919 			     depot_stack_handle_t handle)
920 {
921 	struct track *p = get_track(s, object, alloc);
922 
923 #ifdef CONFIG_STACKDEPOT
924 	p->handle = handle;
925 #endif
926 	p->addr = addr;
927 	p->cpu = smp_processor_id();
928 	p->pid = current->pid;
929 	p->when = jiffies;
930 }
931 
932 static __always_inline void set_track(struct kmem_cache *s, void *object,
933 				      enum track_item alloc, unsigned long addr)
934 {
935 	depot_stack_handle_t handle = set_track_prepare();
936 
937 	set_track_update(s, object, alloc, addr, handle);
938 }
939 
940 static void init_tracking(struct kmem_cache *s, void *object)
941 {
942 	struct track *p;
943 
944 	if (!(s->flags & SLAB_STORE_USER))
945 		return;
946 
947 	p = get_track(s, object, TRACK_ALLOC);
948 	memset(p, 0, 2*sizeof(struct track));
949 }
950 
951 static void print_track(const char *s, struct track *t, unsigned long pr_time)
952 {
953 	depot_stack_handle_t handle __maybe_unused;
954 
955 	if (!t->addr)
956 		return;
957 
958 	pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
959 	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
960 #ifdef CONFIG_STACKDEPOT
961 	handle = READ_ONCE(t->handle);
962 	if (handle)
963 		stack_depot_print(handle);
964 	else
965 		pr_err("object allocation/free stack trace missing\n");
966 #endif
967 }
968 
969 void print_tracking(struct kmem_cache *s, void *object)
970 {
971 	unsigned long pr_time = jiffies;
972 	if (!(s->flags & SLAB_STORE_USER))
973 		return;
974 
975 	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
976 	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
977 }
978 
979 static void print_slab_info(const struct slab *slab)
980 {
981 	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
982 	       slab, slab->objects, slab->inuse, slab->freelist,
983 	       &slab->__page_flags);
984 }
985 
986 /*
987  * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
988  * family will round up the real request size to these fixed ones, so
989  * there could be an extra area than what is requested. Save the original
990  * request size in the meta data area, for better debug and sanity check.
991  */
992 static inline void set_orig_size(struct kmem_cache *s,
993 				void *object, unsigned int orig_size)
994 {
995 	void *p = kasan_reset_tag(object);
996 	unsigned int kasan_meta_size;
997 
998 	if (!slub_debug_orig_size(s))
999 		return;
1000 
1001 	/*
1002 	 * KASAN can save its free meta data inside of the object at offset 0.
1003 	 * If this meta data size is larger than 'orig_size', it will overlap
1004 	 * the data redzone in [orig_size+1, object_size]. Thus, we adjust
1005 	 * 'orig_size' to be as at least as big as KASAN's meta data.
1006 	 */
1007 	kasan_meta_size = kasan_metadata_size(s, true);
1008 	if (kasan_meta_size > orig_size)
1009 		orig_size = kasan_meta_size;
1010 
1011 	p += get_info_end(s);
1012 	p += sizeof(struct track) * 2;
1013 
1014 	*(unsigned int *)p = orig_size;
1015 }
1016 
1017 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
1018 {
1019 	void *p = kasan_reset_tag(object);
1020 
1021 	if (!slub_debug_orig_size(s))
1022 		return s->object_size;
1023 
1024 	p += get_info_end(s);
1025 	p += sizeof(struct track) * 2;
1026 
1027 	return *(unsigned int *)p;
1028 }
1029 
1030 void skip_orig_size_check(struct kmem_cache *s, const void *object)
1031 {
1032 	set_orig_size(s, (void *)object, s->object_size);
1033 }
1034 
1035 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
1036 {
1037 	struct va_format vaf;
1038 	va_list args;
1039 
1040 	va_start(args, fmt);
1041 	vaf.fmt = fmt;
1042 	vaf.va = &args;
1043 	pr_err("=============================================================================\n");
1044 	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
1045 	pr_err("-----------------------------------------------------------------------------\n\n");
1046 	va_end(args);
1047 }
1048 
1049 __printf(2, 3)
1050 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
1051 {
1052 	struct va_format vaf;
1053 	va_list args;
1054 
1055 	if (slab_add_kunit_errors())
1056 		return;
1057 
1058 	va_start(args, fmt);
1059 	vaf.fmt = fmt;
1060 	vaf.va = &args;
1061 	pr_err("FIX %s: %pV\n", s->name, &vaf);
1062 	va_end(args);
1063 }
1064 
1065 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1066 {
1067 	unsigned int off;	/* Offset of last byte */
1068 	u8 *addr = slab_address(slab);
1069 
1070 	print_tracking(s, p);
1071 
1072 	print_slab_info(slab);
1073 
1074 	pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1075 	       p, p - addr, get_freepointer(s, p));
1076 
1077 	if (s->flags & SLAB_RED_ZONE)
1078 		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
1079 			      s->red_left_pad);
1080 	else if (p > addr + 16)
1081 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1082 
1083 	print_section(KERN_ERR,         "Object   ", p,
1084 		      min_t(unsigned int, s->object_size, PAGE_SIZE));
1085 	if (s->flags & SLAB_RED_ZONE)
1086 		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
1087 			s->inuse - s->object_size);
1088 
1089 	off = get_info_end(s);
1090 
1091 	if (s->flags & SLAB_STORE_USER)
1092 		off += 2 * sizeof(struct track);
1093 
1094 	if (slub_debug_orig_size(s))
1095 		off += sizeof(unsigned int);
1096 
1097 	off += kasan_metadata_size(s, false);
1098 
1099 	if (off != size_from_object(s))
1100 		/* Beginning of the filler is the free pointer */
1101 		print_section(KERN_ERR, "Padding  ", p + off,
1102 			      size_from_object(s) - off);
1103 
1104 	dump_stack();
1105 }
1106 
1107 static void object_err(struct kmem_cache *s, struct slab *slab,
1108 			u8 *object, char *reason)
1109 {
1110 	if (slab_add_kunit_errors())
1111 		return;
1112 
1113 	slab_bug(s, "%s", reason);
1114 	print_trailer(s, slab, object);
1115 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1116 }
1117 
1118 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1119 			       void **freelist, void *nextfree)
1120 {
1121 	if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
1122 	    !check_valid_pointer(s, slab, nextfree) && freelist) {
1123 		object_err(s, slab, *freelist, "Freechain corrupt");
1124 		*freelist = NULL;
1125 		slab_fix(s, "Isolate corrupted freechain");
1126 		return true;
1127 	}
1128 
1129 	return false;
1130 }
1131 
1132 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1133 			const char *fmt, ...)
1134 {
1135 	va_list args;
1136 	char buf[100];
1137 
1138 	if (slab_add_kunit_errors())
1139 		return;
1140 
1141 	va_start(args, fmt);
1142 	vsnprintf(buf, sizeof(buf), fmt, args);
1143 	va_end(args);
1144 	slab_bug(s, "%s", buf);
1145 	print_slab_info(slab);
1146 	dump_stack();
1147 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1148 }
1149 
1150 static void init_object(struct kmem_cache *s, void *object, u8 val)
1151 {
1152 	u8 *p = kasan_reset_tag(object);
1153 	unsigned int poison_size = s->object_size;
1154 
1155 	if (s->flags & SLAB_RED_ZONE) {
1156 		memset(p - s->red_left_pad, val, s->red_left_pad);
1157 
1158 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1159 			/*
1160 			 * Redzone the extra allocated space by kmalloc than
1161 			 * requested, and the poison size will be limited to
1162 			 * the original request size accordingly.
1163 			 */
1164 			poison_size = get_orig_size(s, object);
1165 		}
1166 	}
1167 
1168 	if (s->flags & __OBJECT_POISON) {
1169 		memset(p, POISON_FREE, poison_size - 1);
1170 		p[poison_size - 1] = POISON_END;
1171 	}
1172 
1173 	if (s->flags & SLAB_RED_ZONE)
1174 		memset(p + poison_size, val, s->inuse - poison_size);
1175 }
1176 
1177 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
1178 						void *from, void *to)
1179 {
1180 	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1181 	memset(from, data, to - from);
1182 }
1183 
1184 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1185 			u8 *object, char *what,
1186 			u8 *start, unsigned int value, unsigned int bytes)
1187 {
1188 	u8 *fault;
1189 	u8 *end;
1190 	u8 *addr = slab_address(slab);
1191 
1192 	metadata_access_enable();
1193 	fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1194 	metadata_access_disable();
1195 	if (!fault)
1196 		return 1;
1197 
1198 	end = start + bytes;
1199 	while (end > fault && end[-1] == value)
1200 		end--;
1201 
1202 	if (slab_add_kunit_errors())
1203 		goto skip_bug_print;
1204 
1205 	slab_bug(s, "%s overwritten", what);
1206 	pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1207 					fault, end - 1, fault - addr,
1208 					fault[0], value);
1209 
1210 skip_bug_print:
1211 	restore_bytes(s, what, value, fault, end);
1212 	return 0;
1213 }
1214 
1215 /*
1216  * Object layout:
1217  *
1218  * object address
1219  * 	Bytes of the object to be managed.
1220  * 	If the freepointer may overlay the object then the free
1221  *	pointer is at the middle of the object.
1222  *
1223  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
1224  * 	0xa5 (POISON_END)
1225  *
1226  * object + s->object_size
1227  * 	Padding to reach word boundary. This is also used for Redzoning.
1228  * 	Padding is extended by another word if Redzoning is enabled and
1229  * 	object_size == inuse.
1230  *
1231  * 	We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with
1232  * 	0xcc (SLUB_RED_ACTIVE) for objects in use.
1233  *
1234  * object + s->inuse
1235  * 	Meta data starts here.
1236  *
1237  * 	A. Free pointer (if we cannot overwrite object on free)
1238  * 	B. Tracking data for SLAB_STORE_USER
1239  *	C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1240  *	D. Padding to reach required alignment boundary or at minimum
1241  * 		one word if debugging is on to be able to detect writes
1242  * 		before the word boundary.
1243  *
1244  *	Padding is done using 0x5a (POISON_INUSE)
1245  *
1246  * object + s->size
1247  * 	Nothing is used beyond s->size.
1248  *
1249  * If slabcaches are merged then the object_size and inuse boundaries are mostly
1250  * ignored. And therefore no slab options that rely on these boundaries
1251  * may be used with merged slabcaches.
1252  */
1253 
1254 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1255 {
1256 	unsigned long off = get_info_end(s);	/* The end of info */
1257 
1258 	if (s->flags & SLAB_STORE_USER) {
1259 		/* We also have user information there */
1260 		off += 2 * sizeof(struct track);
1261 
1262 		if (s->flags & SLAB_KMALLOC)
1263 			off += sizeof(unsigned int);
1264 	}
1265 
1266 	off += kasan_metadata_size(s, false);
1267 
1268 	if (size_from_object(s) == off)
1269 		return 1;
1270 
1271 	return check_bytes_and_report(s, slab, p, "Object padding",
1272 			p + off, POISON_INUSE, size_from_object(s) - off);
1273 }
1274 
1275 /* Check the pad bytes at the end of a slab page */
1276 static void slab_pad_check(struct kmem_cache *s, struct slab *slab)
1277 {
1278 	u8 *start;
1279 	u8 *fault;
1280 	u8 *end;
1281 	u8 *pad;
1282 	int length;
1283 	int remainder;
1284 
1285 	if (!(s->flags & SLAB_POISON))
1286 		return;
1287 
1288 	start = slab_address(slab);
1289 	length = slab_size(slab);
1290 	end = start + length;
1291 	remainder = length % s->size;
1292 	if (!remainder)
1293 		return;
1294 
1295 	pad = end - remainder;
1296 	metadata_access_enable();
1297 	fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1298 	metadata_access_disable();
1299 	if (!fault)
1300 		return;
1301 	while (end > fault && end[-1] == POISON_INUSE)
1302 		end--;
1303 
1304 	slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1305 			fault, end - 1, fault - start);
1306 	print_section(KERN_ERR, "Padding ", pad, remainder);
1307 
1308 	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1309 }
1310 
1311 static int check_object(struct kmem_cache *s, struct slab *slab,
1312 					void *object, u8 val)
1313 {
1314 	u8 *p = object;
1315 	u8 *endobject = object + s->object_size;
1316 	unsigned int orig_size, kasan_meta_size;
1317 	int ret = 1;
1318 
1319 	if (s->flags & SLAB_RED_ZONE) {
1320 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1321 			object - s->red_left_pad, val, s->red_left_pad))
1322 			ret = 0;
1323 
1324 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1325 			endobject, val, s->inuse - s->object_size))
1326 			ret = 0;
1327 
1328 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1329 			orig_size = get_orig_size(s, object);
1330 
1331 			if (s->object_size > orig_size  &&
1332 				!check_bytes_and_report(s, slab, object,
1333 					"kmalloc Redzone", p + orig_size,
1334 					val, s->object_size - orig_size)) {
1335 				ret = 0;
1336 			}
1337 		}
1338 	} else {
1339 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1340 			if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1341 				endobject, POISON_INUSE,
1342 				s->inuse - s->object_size))
1343 				ret = 0;
1344 		}
1345 	}
1346 
1347 	if (s->flags & SLAB_POISON) {
1348 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1349 			/*
1350 			 * KASAN can save its free meta data inside of the
1351 			 * object at offset 0. Thus, skip checking the part of
1352 			 * the redzone that overlaps with the meta data.
1353 			 */
1354 			kasan_meta_size = kasan_metadata_size(s, true);
1355 			if (kasan_meta_size < s->object_size - 1 &&
1356 			    !check_bytes_and_report(s, slab, p, "Poison",
1357 					p + kasan_meta_size, POISON_FREE,
1358 					s->object_size - kasan_meta_size - 1))
1359 				ret = 0;
1360 			if (kasan_meta_size < s->object_size &&
1361 			    !check_bytes_and_report(s, slab, p, "End Poison",
1362 					p + s->object_size - 1, POISON_END, 1))
1363 				ret = 0;
1364 		}
1365 		/*
1366 		 * check_pad_bytes cleans up on its own.
1367 		 */
1368 		if (!check_pad_bytes(s, slab, p))
1369 			ret = 0;
1370 	}
1371 
1372 	/*
1373 	 * Cannot check freepointer while object is allocated if
1374 	 * object and freepointer overlap.
1375 	 */
1376 	if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1377 	    !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1378 		object_err(s, slab, p, "Freepointer corrupt");
1379 		/*
1380 		 * No choice but to zap it and thus lose the remainder
1381 		 * of the free objects in this slab. May cause
1382 		 * another error because the object count is now wrong.
1383 		 */
1384 		set_freepointer(s, p, NULL);
1385 		ret = 0;
1386 	}
1387 
1388 	if (!ret && !slab_in_kunit_test()) {
1389 		print_trailer(s, slab, object);
1390 		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1391 	}
1392 
1393 	return ret;
1394 }
1395 
1396 static int check_slab(struct kmem_cache *s, struct slab *slab)
1397 {
1398 	int maxobj;
1399 
1400 	if (!folio_test_slab(slab_folio(slab))) {
1401 		slab_err(s, slab, "Not a valid slab page");
1402 		return 0;
1403 	}
1404 
1405 	maxobj = order_objects(slab_order(slab), s->size);
1406 	if (slab->objects > maxobj) {
1407 		slab_err(s, slab, "objects %u > max %u",
1408 			slab->objects, maxobj);
1409 		return 0;
1410 	}
1411 	if (slab->inuse > slab->objects) {
1412 		slab_err(s, slab, "inuse %u > max %u",
1413 			slab->inuse, slab->objects);
1414 		return 0;
1415 	}
1416 	/* Slab_pad_check fixes things up after itself */
1417 	slab_pad_check(s, slab);
1418 	return 1;
1419 }
1420 
1421 /*
1422  * Determine if a certain object in a slab is on the freelist. Must hold the
1423  * slab lock to guarantee that the chains are in a consistent state.
1424  */
1425 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1426 {
1427 	int nr = 0;
1428 	void *fp;
1429 	void *object = NULL;
1430 	int max_objects;
1431 
1432 	fp = slab->freelist;
1433 	while (fp && nr <= slab->objects) {
1434 		if (fp == search)
1435 			return 1;
1436 		if (!check_valid_pointer(s, slab, fp)) {
1437 			if (object) {
1438 				object_err(s, slab, object,
1439 					"Freechain corrupt");
1440 				set_freepointer(s, object, NULL);
1441 			} else {
1442 				slab_err(s, slab, "Freepointer corrupt");
1443 				slab->freelist = NULL;
1444 				slab->inuse = slab->objects;
1445 				slab_fix(s, "Freelist cleared");
1446 				return 0;
1447 			}
1448 			break;
1449 		}
1450 		object = fp;
1451 		fp = get_freepointer(s, object);
1452 		nr++;
1453 	}
1454 
1455 	max_objects = order_objects(slab_order(slab), s->size);
1456 	if (max_objects > MAX_OBJS_PER_PAGE)
1457 		max_objects = MAX_OBJS_PER_PAGE;
1458 
1459 	if (slab->objects != max_objects) {
1460 		slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1461 			 slab->objects, max_objects);
1462 		slab->objects = max_objects;
1463 		slab_fix(s, "Number of objects adjusted");
1464 	}
1465 	if (slab->inuse != slab->objects - nr) {
1466 		slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1467 			 slab->inuse, slab->objects - nr);
1468 		slab->inuse = slab->objects - nr;
1469 		slab_fix(s, "Object count adjusted");
1470 	}
1471 	return search == NULL;
1472 }
1473 
1474 static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1475 								int alloc)
1476 {
1477 	if (s->flags & SLAB_TRACE) {
1478 		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1479 			s->name,
1480 			alloc ? "alloc" : "free",
1481 			object, slab->inuse,
1482 			slab->freelist);
1483 
1484 		if (!alloc)
1485 			print_section(KERN_INFO, "Object ", (void *)object,
1486 					s->object_size);
1487 
1488 		dump_stack();
1489 	}
1490 }
1491 
1492 /*
1493  * Tracking of fully allocated slabs for debugging purposes.
1494  */
1495 static void add_full(struct kmem_cache *s,
1496 	struct kmem_cache_node *n, struct slab *slab)
1497 {
1498 	if (!(s->flags & SLAB_STORE_USER))
1499 		return;
1500 
1501 	lockdep_assert_held(&n->list_lock);
1502 	list_add(&slab->slab_list, &n->full);
1503 }
1504 
1505 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1506 {
1507 	if (!(s->flags & SLAB_STORE_USER))
1508 		return;
1509 
1510 	lockdep_assert_held(&n->list_lock);
1511 	list_del(&slab->slab_list);
1512 }
1513 
1514 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1515 {
1516 	return atomic_long_read(&n->nr_slabs);
1517 }
1518 
1519 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1520 {
1521 	struct kmem_cache_node *n = get_node(s, node);
1522 
1523 	atomic_long_inc(&n->nr_slabs);
1524 	atomic_long_add(objects, &n->total_objects);
1525 }
1526 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1527 {
1528 	struct kmem_cache_node *n = get_node(s, node);
1529 
1530 	atomic_long_dec(&n->nr_slabs);
1531 	atomic_long_sub(objects, &n->total_objects);
1532 }
1533 
1534 /* Object debug checks for alloc/free paths */
1535 static void setup_object_debug(struct kmem_cache *s, void *object)
1536 {
1537 	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1538 		return;
1539 
1540 	init_object(s, object, SLUB_RED_INACTIVE);
1541 	init_tracking(s, object);
1542 }
1543 
1544 static
1545 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1546 {
1547 	if (!kmem_cache_debug_flags(s, SLAB_POISON))
1548 		return;
1549 
1550 	metadata_access_enable();
1551 	memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1552 	metadata_access_disable();
1553 }
1554 
1555 static inline int alloc_consistency_checks(struct kmem_cache *s,
1556 					struct slab *slab, void *object)
1557 {
1558 	if (!check_slab(s, slab))
1559 		return 0;
1560 
1561 	if (!check_valid_pointer(s, slab, object)) {
1562 		object_err(s, slab, object, "Freelist Pointer check fails");
1563 		return 0;
1564 	}
1565 
1566 	if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1567 		return 0;
1568 
1569 	return 1;
1570 }
1571 
1572 static noinline bool alloc_debug_processing(struct kmem_cache *s,
1573 			struct slab *slab, void *object, int orig_size)
1574 {
1575 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1576 		if (!alloc_consistency_checks(s, slab, object))
1577 			goto bad;
1578 	}
1579 
1580 	/* Success. Perform special debug activities for allocs */
1581 	trace(s, slab, object, 1);
1582 	set_orig_size(s, object, orig_size);
1583 	init_object(s, object, SLUB_RED_ACTIVE);
1584 	return true;
1585 
1586 bad:
1587 	if (folio_test_slab(slab_folio(slab))) {
1588 		/*
1589 		 * If this is a slab page then lets do the best we can
1590 		 * to avoid issues in the future. Marking all objects
1591 		 * as used avoids touching the remaining objects.
1592 		 */
1593 		slab_fix(s, "Marking all objects used");
1594 		slab->inuse = slab->objects;
1595 		slab->freelist = NULL;
1596 	}
1597 	return false;
1598 }
1599 
1600 static inline int free_consistency_checks(struct kmem_cache *s,
1601 		struct slab *slab, void *object, unsigned long addr)
1602 {
1603 	if (!check_valid_pointer(s, slab, object)) {
1604 		slab_err(s, slab, "Invalid object pointer 0x%p", object);
1605 		return 0;
1606 	}
1607 
1608 	if (on_freelist(s, slab, object)) {
1609 		object_err(s, slab, object, "Object already free");
1610 		return 0;
1611 	}
1612 
1613 	if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1614 		return 0;
1615 
1616 	if (unlikely(s != slab->slab_cache)) {
1617 		if (!folio_test_slab(slab_folio(slab))) {
1618 			slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
1619 				 object);
1620 		} else if (!slab->slab_cache) {
1621 			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1622 			       object);
1623 			dump_stack();
1624 		} else
1625 			object_err(s, slab, object,
1626 					"page slab pointer corrupt.");
1627 		return 0;
1628 	}
1629 	return 1;
1630 }
1631 
1632 /*
1633  * Parse a block of slab_debug options. Blocks are delimited by ';'
1634  *
1635  * @str:    start of block
1636  * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1637  * @slabs:  return start of list of slabs, or NULL when there's no list
1638  * @init:   assume this is initial parsing and not per-kmem-create parsing
1639  *
1640  * returns the start of next block if there's any, or NULL
1641  */
1642 static char *
1643 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1644 {
1645 	bool higher_order_disable = false;
1646 
1647 	/* Skip any completely empty blocks */
1648 	while (*str && *str == ';')
1649 		str++;
1650 
1651 	if (*str == ',') {
1652 		/*
1653 		 * No options but restriction on slabs. This means full
1654 		 * debugging for slabs matching a pattern.
1655 		 */
1656 		*flags = DEBUG_DEFAULT_FLAGS;
1657 		goto check_slabs;
1658 	}
1659 	*flags = 0;
1660 
1661 	/* Determine which debug features should be switched on */
1662 	for (; *str && *str != ',' && *str != ';'; str++) {
1663 		switch (tolower(*str)) {
1664 		case '-':
1665 			*flags = 0;
1666 			break;
1667 		case 'f':
1668 			*flags |= SLAB_CONSISTENCY_CHECKS;
1669 			break;
1670 		case 'z':
1671 			*flags |= SLAB_RED_ZONE;
1672 			break;
1673 		case 'p':
1674 			*flags |= SLAB_POISON;
1675 			break;
1676 		case 'u':
1677 			*flags |= SLAB_STORE_USER;
1678 			break;
1679 		case 't':
1680 			*flags |= SLAB_TRACE;
1681 			break;
1682 		case 'a':
1683 			*flags |= SLAB_FAILSLAB;
1684 			break;
1685 		case 'o':
1686 			/*
1687 			 * Avoid enabling debugging on caches if its minimum
1688 			 * order would increase as a result.
1689 			 */
1690 			higher_order_disable = true;
1691 			break;
1692 		default:
1693 			if (init)
1694 				pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1695 		}
1696 	}
1697 check_slabs:
1698 	if (*str == ',')
1699 		*slabs = ++str;
1700 	else
1701 		*slabs = NULL;
1702 
1703 	/* Skip over the slab list */
1704 	while (*str && *str != ';')
1705 		str++;
1706 
1707 	/* Skip any completely empty blocks */
1708 	while (*str && *str == ';')
1709 		str++;
1710 
1711 	if (init && higher_order_disable)
1712 		disable_higher_order_debug = 1;
1713 
1714 	if (*str)
1715 		return str;
1716 	else
1717 		return NULL;
1718 }
1719 
1720 static int __init setup_slub_debug(char *str)
1721 {
1722 	slab_flags_t flags;
1723 	slab_flags_t global_flags;
1724 	char *saved_str;
1725 	char *slab_list;
1726 	bool global_slub_debug_changed = false;
1727 	bool slab_list_specified = false;
1728 
1729 	global_flags = DEBUG_DEFAULT_FLAGS;
1730 	if (*str++ != '=' || !*str)
1731 		/*
1732 		 * No options specified. Switch on full debugging.
1733 		 */
1734 		goto out;
1735 
1736 	saved_str = str;
1737 	while (str) {
1738 		str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1739 
1740 		if (!slab_list) {
1741 			global_flags = flags;
1742 			global_slub_debug_changed = true;
1743 		} else {
1744 			slab_list_specified = true;
1745 			if (flags & SLAB_STORE_USER)
1746 				stack_depot_request_early_init();
1747 		}
1748 	}
1749 
1750 	/*
1751 	 * For backwards compatibility, a single list of flags with list of
1752 	 * slabs means debugging is only changed for those slabs, so the global
1753 	 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1754 	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1755 	 * long as there is no option specifying flags without a slab list.
1756 	 */
1757 	if (slab_list_specified) {
1758 		if (!global_slub_debug_changed)
1759 			global_flags = slub_debug;
1760 		slub_debug_string = saved_str;
1761 	}
1762 out:
1763 	slub_debug = global_flags;
1764 	if (slub_debug & SLAB_STORE_USER)
1765 		stack_depot_request_early_init();
1766 	if (slub_debug != 0 || slub_debug_string)
1767 		static_branch_enable(&slub_debug_enabled);
1768 	else
1769 		static_branch_disable(&slub_debug_enabled);
1770 	if ((static_branch_unlikely(&init_on_alloc) ||
1771 	     static_branch_unlikely(&init_on_free)) &&
1772 	    (slub_debug & SLAB_POISON))
1773 		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1774 	return 1;
1775 }
1776 
1777 __setup("slab_debug", setup_slub_debug);
1778 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0);
1779 
1780 /*
1781  * kmem_cache_flags - apply debugging options to the cache
1782  * @flags:		flags to set
1783  * @name:		name of the cache
1784  *
1785  * Debug option(s) are applied to @flags. In addition to the debug
1786  * option(s), if a slab name (or multiple) is specified i.e.
1787  * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1788  * then only the select slabs will receive the debug option(s).
1789  */
1790 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1791 {
1792 	char *iter;
1793 	size_t len;
1794 	char *next_block;
1795 	slab_flags_t block_flags;
1796 	slab_flags_t slub_debug_local = slub_debug;
1797 
1798 	if (flags & SLAB_NO_USER_FLAGS)
1799 		return flags;
1800 
1801 	/*
1802 	 * If the slab cache is for debugging (e.g. kmemleak) then
1803 	 * don't store user (stack trace) information by default,
1804 	 * but let the user enable it via the command line below.
1805 	 */
1806 	if (flags & SLAB_NOLEAKTRACE)
1807 		slub_debug_local &= ~SLAB_STORE_USER;
1808 
1809 	len = strlen(name);
1810 	next_block = slub_debug_string;
1811 	/* Go through all blocks of debug options, see if any matches our slab's name */
1812 	while (next_block) {
1813 		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1814 		if (!iter)
1815 			continue;
1816 		/* Found a block that has a slab list, search it */
1817 		while (*iter) {
1818 			char *end, *glob;
1819 			size_t cmplen;
1820 
1821 			end = strchrnul(iter, ',');
1822 			if (next_block && next_block < end)
1823 				end = next_block - 1;
1824 
1825 			glob = strnchr(iter, end - iter, '*');
1826 			if (glob)
1827 				cmplen = glob - iter;
1828 			else
1829 				cmplen = max_t(size_t, len, (end - iter));
1830 
1831 			if (!strncmp(name, iter, cmplen)) {
1832 				flags |= block_flags;
1833 				return flags;
1834 			}
1835 
1836 			if (!*end || *end == ';')
1837 				break;
1838 			iter = end + 1;
1839 		}
1840 	}
1841 
1842 	return flags | slub_debug_local;
1843 }
1844 #else /* !CONFIG_SLUB_DEBUG */
1845 static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1846 static inline
1847 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1848 
1849 static inline bool alloc_debug_processing(struct kmem_cache *s,
1850 	struct slab *slab, void *object, int orig_size) { return true; }
1851 
1852 static inline bool free_debug_processing(struct kmem_cache *s,
1853 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
1854 	unsigned long addr, depot_stack_handle_t handle) { return true; }
1855 
1856 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
1857 static inline int check_object(struct kmem_cache *s, struct slab *slab,
1858 			void *object, u8 val) { return 1; }
1859 static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
1860 static inline void set_track(struct kmem_cache *s, void *object,
1861 			     enum track_item alloc, unsigned long addr) {}
1862 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1863 					struct slab *slab) {}
1864 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1865 					struct slab *slab) {}
1866 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1867 {
1868 	return flags;
1869 }
1870 #define slub_debug 0
1871 
1872 #define disable_higher_order_debug 0
1873 
1874 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1875 							{ return 0; }
1876 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1877 							int objects) {}
1878 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1879 							int objects) {}
1880 
1881 #ifndef CONFIG_SLUB_TINY
1882 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1883 			       void **freelist, void *nextfree)
1884 {
1885 	return false;
1886 }
1887 #endif
1888 #endif /* CONFIG_SLUB_DEBUG */
1889 
1890 #ifdef CONFIG_SLAB_OBJ_EXT
1891 
1892 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
1893 
1894 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
1895 {
1896 	struct slabobj_ext *slab_exts;
1897 	struct slab *obj_exts_slab;
1898 
1899 	obj_exts_slab = virt_to_slab(obj_exts);
1900 	slab_exts = slab_obj_exts(obj_exts_slab);
1901 	if (slab_exts) {
1902 		unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
1903 						 obj_exts_slab, obj_exts);
1904 		/* codetag should be NULL */
1905 		WARN_ON(slab_exts[offs].ref.ct);
1906 		set_codetag_empty(&slab_exts[offs].ref);
1907 	}
1908 }
1909 
1910 static inline void mark_failed_objexts_alloc(struct slab *slab)
1911 {
1912 	slab->obj_exts = OBJEXTS_ALLOC_FAIL;
1913 }
1914 
1915 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
1916 			struct slabobj_ext *vec, unsigned int objects)
1917 {
1918 	/*
1919 	 * If vector previously failed to allocate then we have live
1920 	 * objects with no tag reference. Mark all references in this
1921 	 * vector as empty to avoid warnings later on.
1922 	 */
1923 	if (obj_exts & OBJEXTS_ALLOC_FAIL) {
1924 		unsigned int i;
1925 
1926 		for (i = 0; i < objects; i++)
1927 			set_codetag_empty(&vec[i].ref);
1928 	}
1929 }
1930 
1931 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1932 
1933 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
1934 static inline void mark_failed_objexts_alloc(struct slab *slab) {}
1935 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
1936 			struct slabobj_ext *vec, unsigned int objects) {}
1937 
1938 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1939 
1940 /*
1941  * The allocated objcg pointers array is not accounted directly.
1942  * Moreover, it should not come from DMA buffer and is not readily
1943  * reclaimable. So those GFP bits should be masked off.
1944  */
1945 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
1946 				__GFP_ACCOUNT | __GFP_NOFAIL)
1947 
1948 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
1949 		        gfp_t gfp, bool new_slab)
1950 {
1951 	unsigned int objects = objs_per_slab(s, slab);
1952 	unsigned long new_exts;
1953 	unsigned long old_exts;
1954 	struct slabobj_ext *vec;
1955 
1956 	gfp &= ~OBJCGS_CLEAR_MASK;
1957 	/* Prevent recursive extension vector allocation */
1958 	gfp |= __GFP_NO_OBJ_EXT;
1959 	vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
1960 			   slab_nid(slab));
1961 	if (!vec) {
1962 		/* Mark vectors which failed to allocate */
1963 		if (new_slab)
1964 			mark_failed_objexts_alloc(slab);
1965 
1966 		return -ENOMEM;
1967 	}
1968 
1969 	new_exts = (unsigned long)vec;
1970 #ifdef CONFIG_MEMCG
1971 	new_exts |= MEMCG_DATA_OBJEXTS;
1972 #endif
1973 	old_exts = READ_ONCE(slab->obj_exts);
1974 	handle_failed_objexts_alloc(old_exts, vec, objects);
1975 	if (new_slab) {
1976 		/*
1977 		 * If the slab is brand new and nobody can yet access its
1978 		 * obj_exts, no synchronization is required and obj_exts can
1979 		 * be simply assigned.
1980 		 */
1981 		slab->obj_exts = new_exts;
1982 	} else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
1983 		   cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
1984 		/*
1985 		 * If the slab is already in use, somebody can allocate and
1986 		 * assign slabobj_exts in parallel. In this case the existing
1987 		 * objcg vector should be reused.
1988 		 */
1989 		mark_objexts_empty(vec);
1990 		kfree(vec);
1991 		return 0;
1992 	}
1993 
1994 	kmemleak_not_leak(vec);
1995 	return 0;
1996 }
1997 
1998 static inline void free_slab_obj_exts(struct slab *slab)
1999 {
2000 	struct slabobj_ext *obj_exts;
2001 
2002 	obj_exts = slab_obj_exts(slab);
2003 	if (!obj_exts)
2004 		return;
2005 
2006 	/*
2007 	 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
2008 	 * corresponding extension will be NULL. alloc_tag_sub() will throw a
2009 	 * warning if slab has extensions but the extension of an object is
2010 	 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
2011 	 * the extension for obj_exts is expected to be NULL.
2012 	 */
2013 	mark_objexts_empty(obj_exts);
2014 	kfree(obj_exts);
2015 	slab->obj_exts = 0;
2016 }
2017 
2018 static inline bool need_slab_obj_ext(void)
2019 {
2020 	if (mem_alloc_profiling_enabled())
2021 		return true;
2022 
2023 	/*
2024 	 * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
2025 	 * inside memcg_slab_post_alloc_hook. No other users for now.
2026 	 */
2027 	return false;
2028 }
2029 
2030 #else /* CONFIG_SLAB_OBJ_EXT */
2031 
2032 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2033 			       gfp_t gfp, bool new_slab)
2034 {
2035 	return 0;
2036 }
2037 
2038 static inline void free_slab_obj_exts(struct slab *slab)
2039 {
2040 }
2041 
2042 static inline bool need_slab_obj_ext(void)
2043 {
2044 	return false;
2045 }
2046 
2047 #endif /* CONFIG_SLAB_OBJ_EXT */
2048 
2049 #ifdef CONFIG_MEM_ALLOC_PROFILING
2050 
2051 static inline struct slabobj_ext *
2052 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
2053 {
2054 	struct slab *slab;
2055 
2056 	if (!p)
2057 		return NULL;
2058 
2059 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2060 		return NULL;
2061 
2062 	if (flags & __GFP_NO_OBJ_EXT)
2063 		return NULL;
2064 
2065 	slab = virt_to_slab(p);
2066 	if (!slab_obj_exts(slab) &&
2067 	    WARN(alloc_slab_obj_exts(slab, s, flags, false),
2068 		 "%s, %s: Failed to create slab extension vector!\n",
2069 		 __func__, s->name))
2070 		return NULL;
2071 
2072 	return slab_obj_exts(slab) + obj_to_index(s, slab, p);
2073 }
2074 
2075 static inline void
2076 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2077 {
2078 	if (need_slab_obj_ext()) {
2079 		struct slabobj_ext *obj_exts;
2080 
2081 		obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
2082 		/*
2083 		 * Currently obj_exts is used only for allocation profiling.
2084 		 * If other users appear then mem_alloc_profiling_enabled()
2085 		 * check should be added before alloc_tag_add().
2086 		 */
2087 		if (likely(obj_exts))
2088 			alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
2089 	}
2090 }
2091 
2092 static inline void
2093 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2094 			     int objects)
2095 {
2096 	struct slabobj_ext *obj_exts;
2097 	int i;
2098 
2099 	if (!mem_alloc_profiling_enabled())
2100 		return;
2101 
2102 	obj_exts = slab_obj_exts(slab);
2103 	if (!obj_exts)
2104 		return;
2105 
2106 	for (i = 0; i < objects; i++) {
2107 		unsigned int off = obj_to_index(s, slab, p[i]);
2108 
2109 		alloc_tag_sub(&obj_exts[off].ref, s->size);
2110 	}
2111 }
2112 
2113 #else /* CONFIG_MEM_ALLOC_PROFILING */
2114 
2115 static inline void
2116 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2117 {
2118 }
2119 
2120 static inline void
2121 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2122 			     int objects)
2123 {
2124 }
2125 
2126 #endif /* CONFIG_MEM_ALLOC_PROFILING */
2127 
2128 
2129 #ifdef CONFIG_MEMCG_KMEM
2130 
2131 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
2132 
2133 static __fastpath_inline
2134 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2135 				gfp_t flags, size_t size, void **p)
2136 {
2137 	if (likely(!memcg_kmem_online()))
2138 		return true;
2139 
2140 	if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2141 		return true;
2142 
2143 	if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
2144 		return true;
2145 
2146 	if (likely(size == 1)) {
2147 		memcg_alloc_abort_single(s, *p);
2148 		*p = NULL;
2149 	} else {
2150 		kmem_cache_free_bulk(s, size, p);
2151 	}
2152 
2153 	return false;
2154 }
2155 
2156 static __fastpath_inline
2157 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2158 			  int objects)
2159 {
2160 	struct slabobj_ext *obj_exts;
2161 
2162 	if (!memcg_kmem_online())
2163 		return;
2164 
2165 	obj_exts = slab_obj_exts(slab);
2166 	if (likely(!obj_exts))
2167 		return;
2168 
2169 	__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2170 }
2171 #else /* CONFIG_MEMCG_KMEM */
2172 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
2173 					      struct list_lru *lru,
2174 					      gfp_t flags, size_t size,
2175 					      void **p)
2176 {
2177 	return true;
2178 }
2179 
2180 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2181 					void **p, int objects)
2182 {
2183 }
2184 #endif /* CONFIG_MEMCG_KMEM */
2185 
2186 /*
2187  * Hooks for other subsystems that check memory allocations. In a typical
2188  * production configuration these hooks all should produce no code at all.
2189  *
2190  * Returns true if freeing of the object can proceed, false if its reuse
2191  * was delayed by KASAN quarantine, or it was returned to KFENCE.
2192  */
2193 static __always_inline
2194 bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
2195 {
2196 	kmemleak_free_recursive(x, s->flags);
2197 	kmsan_slab_free(s, x);
2198 
2199 	debug_check_no_locks_freed(x, s->object_size);
2200 
2201 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
2202 		debug_check_no_obj_freed(x, s->object_size);
2203 
2204 	/* Use KCSAN to help debug racy use-after-free. */
2205 	if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
2206 		__kcsan_check_access(x, s->object_size,
2207 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2208 
2209 	if (kfence_free(x))
2210 		return false;
2211 
2212 	/*
2213 	 * As memory initialization might be integrated into KASAN,
2214 	 * kasan_slab_free and initialization memset's must be
2215 	 * kept together to avoid discrepancies in behavior.
2216 	 *
2217 	 * The initialization memset's clear the object and the metadata,
2218 	 * but don't touch the SLAB redzone.
2219 	 *
2220 	 * The object's freepointer is also avoided if stored outside the
2221 	 * object.
2222 	 */
2223 	if (unlikely(init)) {
2224 		int rsize;
2225 		unsigned int inuse;
2226 
2227 		inuse = get_info_end(s);
2228 		if (!kasan_has_integrated_init())
2229 			memset(kasan_reset_tag(x), 0, s->object_size);
2230 		rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2231 		memset((char *)kasan_reset_tag(x) + inuse, 0,
2232 		       s->size - inuse - rsize);
2233 	}
2234 	/* KASAN might put x into memory quarantine, delaying its reuse. */
2235 	return !kasan_slab_free(s, x, init);
2236 }
2237 
2238 static __fastpath_inline
2239 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
2240 			     int *cnt)
2241 {
2242 
2243 	void *object;
2244 	void *next = *head;
2245 	void *old_tail = *tail;
2246 	bool init;
2247 
2248 	if (is_kfence_address(next)) {
2249 		slab_free_hook(s, next, false);
2250 		return false;
2251 	}
2252 
2253 	/* Head and tail of the reconstructed freelist */
2254 	*head = NULL;
2255 	*tail = NULL;
2256 
2257 	init = slab_want_init_on_free(s);
2258 
2259 	do {
2260 		object = next;
2261 		next = get_freepointer(s, object);
2262 
2263 		/* If object's reuse doesn't have to be delayed */
2264 		if (likely(slab_free_hook(s, object, init))) {
2265 			/* Move object to the new freelist */
2266 			set_freepointer(s, object, *head);
2267 			*head = object;
2268 			if (!*tail)
2269 				*tail = object;
2270 		} else {
2271 			/*
2272 			 * Adjust the reconstructed freelist depth
2273 			 * accordingly if object's reuse is delayed.
2274 			 */
2275 			--(*cnt);
2276 		}
2277 	} while (object != old_tail);
2278 
2279 	return *head != NULL;
2280 }
2281 
2282 static void *setup_object(struct kmem_cache *s, void *object)
2283 {
2284 	setup_object_debug(s, object);
2285 	object = kasan_init_slab_obj(s, object);
2286 	if (unlikely(s->ctor)) {
2287 		kasan_unpoison_new_object(s, object);
2288 		s->ctor(object);
2289 		kasan_poison_new_object(s, object);
2290 	}
2291 	return object;
2292 }
2293 
2294 /*
2295  * Slab allocation and freeing
2296  */
2297 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
2298 		struct kmem_cache_order_objects oo)
2299 {
2300 	struct folio *folio;
2301 	struct slab *slab;
2302 	unsigned int order = oo_order(oo);
2303 
2304 	folio = (struct folio *)alloc_pages_node(node, flags, order);
2305 	if (!folio)
2306 		return NULL;
2307 
2308 	slab = folio_slab(folio);
2309 	__folio_set_slab(folio);
2310 	/* Make the flag visible before any changes to folio->mapping */
2311 	smp_wmb();
2312 	if (folio_is_pfmemalloc(folio))
2313 		slab_set_pfmemalloc(slab);
2314 
2315 	return slab;
2316 }
2317 
2318 #ifdef CONFIG_SLAB_FREELIST_RANDOM
2319 /* Pre-initialize the random sequence cache */
2320 static int init_cache_random_seq(struct kmem_cache *s)
2321 {
2322 	unsigned int count = oo_objects(s->oo);
2323 	int err;
2324 
2325 	/* Bailout if already initialised */
2326 	if (s->random_seq)
2327 		return 0;
2328 
2329 	err = cache_random_seq_create(s, count, GFP_KERNEL);
2330 	if (err) {
2331 		pr_err("SLUB: Unable to initialize free list for %s\n",
2332 			s->name);
2333 		return err;
2334 	}
2335 
2336 	/* Transform to an offset on the set of pages */
2337 	if (s->random_seq) {
2338 		unsigned int i;
2339 
2340 		for (i = 0; i < count; i++)
2341 			s->random_seq[i] *= s->size;
2342 	}
2343 	return 0;
2344 }
2345 
2346 /* Initialize each random sequence freelist per cache */
2347 static void __init init_freelist_randomization(void)
2348 {
2349 	struct kmem_cache *s;
2350 
2351 	mutex_lock(&slab_mutex);
2352 
2353 	list_for_each_entry(s, &slab_caches, list)
2354 		init_cache_random_seq(s);
2355 
2356 	mutex_unlock(&slab_mutex);
2357 }
2358 
2359 /* Get the next entry on the pre-computed freelist randomized */
2360 static void *next_freelist_entry(struct kmem_cache *s,
2361 				unsigned long *pos, void *start,
2362 				unsigned long page_limit,
2363 				unsigned long freelist_count)
2364 {
2365 	unsigned int idx;
2366 
2367 	/*
2368 	 * If the target page allocation failed, the number of objects on the
2369 	 * page might be smaller than the usual size defined by the cache.
2370 	 */
2371 	do {
2372 		idx = s->random_seq[*pos];
2373 		*pos += 1;
2374 		if (*pos >= freelist_count)
2375 			*pos = 0;
2376 	} while (unlikely(idx >= page_limit));
2377 
2378 	return (char *)start + idx;
2379 }
2380 
2381 /* Shuffle the single linked freelist based on a random pre-computed sequence */
2382 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2383 {
2384 	void *start;
2385 	void *cur;
2386 	void *next;
2387 	unsigned long idx, pos, page_limit, freelist_count;
2388 
2389 	if (slab->objects < 2 || !s->random_seq)
2390 		return false;
2391 
2392 	freelist_count = oo_objects(s->oo);
2393 	pos = get_random_u32_below(freelist_count);
2394 
2395 	page_limit = slab->objects * s->size;
2396 	start = fixup_red_left(s, slab_address(slab));
2397 
2398 	/* First entry is used as the base of the freelist */
2399 	cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
2400 	cur = setup_object(s, cur);
2401 	slab->freelist = cur;
2402 
2403 	for (idx = 1; idx < slab->objects; idx++) {
2404 		next = next_freelist_entry(s, &pos, start, page_limit,
2405 			freelist_count);
2406 		next = setup_object(s, next);
2407 		set_freepointer(s, cur, next);
2408 		cur = next;
2409 	}
2410 	set_freepointer(s, cur, NULL);
2411 
2412 	return true;
2413 }
2414 #else
2415 static inline int init_cache_random_seq(struct kmem_cache *s)
2416 {
2417 	return 0;
2418 }
2419 static inline void init_freelist_randomization(void) { }
2420 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2421 {
2422 	return false;
2423 }
2424 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
2425 
2426 static __always_inline void account_slab(struct slab *slab, int order,
2427 					 struct kmem_cache *s, gfp_t gfp)
2428 {
2429 	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
2430 		alloc_slab_obj_exts(slab, s, gfp, true);
2431 
2432 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2433 			    PAGE_SIZE << order);
2434 }
2435 
2436 static __always_inline void unaccount_slab(struct slab *slab, int order,
2437 					   struct kmem_cache *s)
2438 {
2439 	if (memcg_kmem_online() || need_slab_obj_ext())
2440 		free_slab_obj_exts(slab);
2441 
2442 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2443 			    -(PAGE_SIZE << order));
2444 }
2445 
2446 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
2447 {
2448 	struct slab *slab;
2449 	struct kmem_cache_order_objects oo = s->oo;
2450 	gfp_t alloc_gfp;
2451 	void *start, *p, *next;
2452 	int idx;
2453 	bool shuffle;
2454 
2455 	flags &= gfp_allowed_mask;
2456 
2457 	flags |= s->allocflags;
2458 
2459 	/*
2460 	 * Let the initial higher-order allocation fail under memory pressure
2461 	 * so we fall-back to the minimum order allocation.
2462 	 */
2463 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
2464 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
2465 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
2466 
2467 	slab = alloc_slab_page(alloc_gfp, node, oo);
2468 	if (unlikely(!slab)) {
2469 		oo = s->min;
2470 		alloc_gfp = flags;
2471 		/*
2472 		 * Allocation may have failed due to fragmentation.
2473 		 * Try a lower order alloc if possible
2474 		 */
2475 		slab = alloc_slab_page(alloc_gfp, node, oo);
2476 		if (unlikely(!slab))
2477 			return NULL;
2478 		stat(s, ORDER_FALLBACK);
2479 	}
2480 
2481 	slab->objects = oo_objects(oo);
2482 	slab->inuse = 0;
2483 	slab->frozen = 0;
2484 
2485 	account_slab(slab, oo_order(oo), s, flags);
2486 
2487 	slab->slab_cache = s;
2488 
2489 	kasan_poison_slab(slab);
2490 
2491 	start = slab_address(slab);
2492 
2493 	setup_slab_debug(s, slab, start);
2494 
2495 	shuffle = shuffle_freelist(s, slab);
2496 
2497 	if (!shuffle) {
2498 		start = fixup_red_left(s, start);
2499 		start = setup_object(s, start);
2500 		slab->freelist = start;
2501 		for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2502 			next = p + s->size;
2503 			next = setup_object(s, next);
2504 			set_freepointer(s, p, next);
2505 			p = next;
2506 		}
2507 		set_freepointer(s, p, NULL);
2508 	}
2509 
2510 	return slab;
2511 }
2512 
2513 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
2514 {
2515 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
2516 		flags = kmalloc_fix_flags(flags);
2517 
2518 	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2519 
2520 	return allocate_slab(s,
2521 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
2522 }
2523 
2524 static void __free_slab(struct kmem_cache *s, struct slab *slab)
2525 {
2526 	struct folio *folio = slab_folio(slab);
2527 	int order = folio_order(folio);
2528 	int pages = 1 << order;
2529 
2530 	__slab_clear_pfmemalloc(slab);
2531 	folio->mapping = NULL;
2532 	/* Make the mapping reset visible before clearing the flag */
2533 	smp_wmb();
2534 	__folio_clear_slab(folio);
2535 	mm_account_reclaimed_pages(pages);
2536 	unaccount_slab(slab, order, s);
2537 	__free_pages(&folio->page, order);
2538 }
2539 
2540 static void rcu_free_slab(struct rcu_head *h)
2541 {
2542 	struct slab *slab = container_of(h, struct slab, rcu_head);
2543 
2544 	__free_slab(slab->slab_cache, slab);
2545 }
2546 
2547 static void free_slab(struct kmem_cache *s, struct slab *slab)
2548 {
2549 	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
2550 		void *p;
2551 
2552 		slab_pad_check(s, slab);
2553 		for_each_object(p, s, slab_address(slab), slab->objects)
2554 			check_object(s, slab, p, SLUB_RED_INACTIVE);
2555 	}
2556 
2557 	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
2558 		call_rcu(&slab->rcu_head, rcu_free_slab);
2559 	else
2560 		__free_slab(s, slab);
2561 }
2562 
2563 static void discard_slab(struct kmem_cache *s, struct slab *slab)
2564 {
2565 	dec_slabs_node(s, slab_nid(slab), slab->objects);
2566 	free_slab(s, slab);
2567 }
2568 
2569 /*
2570  * SLUB reuses PG_workingset bit to keep track of whether it's on
2571  * the per-node partial list.
2572  */
2573 static inline bool slab_test_node_partial(const struct slab *slab)
2574 {
2575 	return folio_test_workingset(slab_folio(slab));
2576 }
2577 
2578 static inline void slab_set_node_partial(struct slab *slab)
2579 {
2580 	set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2581 }
2582 
2583 static inline void slab_clear_node_partial(struct slab *slab)
2584 {
2585 	clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2586 }
2587 
2588 /*
2589  * Management of partially allocated slabs.
2590  */
2591 static inline void
2592 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
2593 {
2594 	n->nr_partial++;
2595 	if (tail == DEACTIVATE_TO_TAIL)
2596 		list_add_tail(&slab->slab_list, &n->partial);
2597 	else
2598 		list_add(&slab->slab_list, &n->partial);
2599 	slab_set_node_partial(slab);
2600 }
2601 
2602 static inline void add_partial(struct kmem_cache_node *n,
2603 				struct slab *slab, int tail)
2604 {
2605 	lockdep_assert_held(&n->list_lock);
2606 	__add_partial(n, slab, tail);
2607 }
2608 
2609 static inline void remove_partial(struct kmem_cache_node *n,
2610 					struct slab *slab)
2611 {
2612 	lockdep_assert_held(&n->list_lock);
2613 	list_del(&slab->slab_list);
2614 	slab_clear_node_partial(slab);
2615 	n->nr_partial--;
2616 }
2617 
2618 /*
2619  * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
2620  * slab from the n->partial list. Remove only a single object from the slab, do
2621  * the alloc_debug_processing() checks and leave the slab on the list, or move
2622  * it to full list if it was the last free object.
2623  */
2624 static void *alloc_single_from_partial(struct kmem_cache *s,
2625 		struct kmem_cache_node *n, struct slab *slab, int orig_size)
2626 {
2627 	void *object;
2628 
2629 	lockdep_assert_held(&n->list_lock);
2630 
2631 	object = slab->freelist;
2632 	slab->freelist = get_freepointer(s, object);
2633 	slab->inuse++;
2634 
2635 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
2636 		remove_partial(n, slab);
2637 		return NULL;
2638 	}
2639 
2640 	if (slab->inuse == slab->objects) {
2641 		remove_partial(n, slab);
2642 		add_full(s, n, slab);
2643 	}
2644 
2645 	return object;
2646 }
2647 
2648 /*
2649  * Called only for kmem_cache_debug() caches to allocate from a freshly
2650  * allocated slab. Allocate a single object instead of whole freelist
2651  * and put the slab to the partial (or full) list.
2652  */
2653 static void *alloc_single_from_new_slab(struct kmem_cache *s,
2654 					struct slab *slab, int orig_size)
2655 {
2656 	int nid = slab_nid(slab);
2657 	struct kmem_cache_node *n = get_node(s, nid);
2658 	unsigned long flags;
2659 	void *object;
2660 
2661 
2662 	object = slab->freelist;
2663 	slab->freelist = get_freepointer(s, object);
2664 	slab->inuse = 1;
2665 
2666 	if (!alloc_debug_processing(s, slab, object, orig_size))
2667 		/*
2668 		 * It's not really expected that this would fail on a
2669 		 * freshly allocated slab, but a concurrent memory
2670 		 * corruption in theory could cause that.
2671 		 */
2672 		return NULL;
2673 
2674 	spin_lock_irqsave(&n->list_lock, flags);
2675 
2676 	if (slab->inuse == slab->objects)
2677 		add_full(s, n, slab);
2678 	else
2679 		add_partial(n, slab, DEACTIVATE_TO_HEAD);
2680 
2681 	inc_slabs_node(s, nid, slab->objects);
2682 	spin_unlock_irqrestore(&n->list_lock, flags);
2683 
2684 	return object;
2685 }
2686 
2687 #ifdef CONFIG_SLUB_CPU_PARTIAL
2688 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2689 #else
2690 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
2691 				   int drain) { }
2692 #endif
2693 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
2694 
2695 /*
2696  * Try to allocate a partial slab from a specific node.
2697  */
2698 static struct slab *get_partial_node(struct kmem_cache *s,
2699 				     struct kmem_cache_node *n,
2700 				     struct partial_context *pc)
2701 {
2702 	struct slab *slab, *slab2, *partial = NULL;
2703 	unsigned long flags;
2704 	unsigned int partial_slabs = 0;
2705 
2706 	/*
2707 	 * Racy check. If we mistakenly see no partial slabs then we
2708 	 * just allocate an empty slab. If we mistakenly try to get a
2709 	 * partial slab and there is none available then get_partial()
2710 	 * will return NULL.
2711 	 */
2712 	if (!n || !n->nr_partial)
2713 		return NULL;
2714 
2715 	spin_lock_irqsave(&n->list_lock, flags);
2716 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
2717 		if (!pfmemalloc_match(slab, pc->flags))
2718 			continue;
2719 
2720 		if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
2721 			void *object = alloc_single_from_partial(s, n, slab,
2722 							pc->orig_size);
2723 			if (object) {
2724 				partial = slab;
2725 				pc->object = object;
2726 				break;
2727 			}
2728 			continue;
2729 		}
2730 
2731 		remove_partial(n, slab);
2732 
2733 		if (!partial) {
2734 			partial = slab;
2735 			stat(s, ALLOC_FROM_PARTIAL);
2736 
2737 			if ((slub_get_cpu_partial(s) == 0)) {
2738 				break;
2739 			}
2740 		} else {
2741 			put_cpu_partial(s, slab, 0);
2742 			stat(s, CPU_PARTIAL_NODE);
2743 
2744 			if (++partial_slabs > slub_get_cpu_partial(s) / 2) {
2745 				break;
2746 			}
2747 		}
2748 	}
2749 	spin_unlock_irqrestore(&n->list_lock, flags);
2750 	return partial;
2751 }
2752 
2753 /*
2754  * Get a slab from somewhere. Search in increasing NUMA distances.
2755  */
2756 static struct slab *get_any_partial(struct kmem_cache *s,
2757 				    struct partial_context *pc)
2758 {
2759 #ifdef CONFIG_NUMA
2760 	struct zonelist *zonelist;
2761 	struct zoneref *z;
2762 	struct zone *zone;
2763 	enum zone_type highest_zoneidx = gfp_zone(pc->flags);
2764 	struct slab *slab;
2765 	unsigned int cpuset_mems_cookie;
2766 
2767 	/*
2768 	 * The defrag ratio allows a configuration of the tradeoffs between
2769 	 * inter node defragmentation and node local allocations. A lower
2770 	 * defrag_ratio increases the tendency to do local allocations
2771 	 * instead of attempting to obtain partial slabs from other nodes.
2772 	 *
2773 	 * If the defrag_ratio is set to 0 then kmalloc() always
2774 	 * returns node local objects. If the ratio is higher then kmalloc()
2775 	 * may return off node objects because partial slabs are obtained
2776 	 * from other nodes and filled up.
2777 	 *
2778 	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2779 	 * (which makes defrag_ratio = 1000) then every (well almost)
2780 	 * allocation will first attempt to defrag slab caches on other nodes.
2781 	 * This means scanning over all nodes to look for partial slabs which
2782 	 * may be expensive if we do it every time we are trying to find a slab
2783 	 * with available objects.
2784 	 */
2785 	if (!s->remote_node_defrag_ratio ||
2786 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
2787 		return NULL;
2788 
2789 	do {
2790 		cpuset_mems_cookie = read_mems_allowed_begin();
2791 		zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
2792 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2793 			struct kmem_cache_node *n;
2794 
2795 			n = get_node(s, zone_to_nid(zone));
2796 
2797 			if (n && cpuset_zone_allowed(zone, pc->flags) &&
2798 					n->nr_partial > s->min_partial) {
2799 				slab = get_partial_node(s, n, pc);
2800 				if (slab) {
2801 					/*
2802 					 * Don't check read_mems_allowed_retry()
2803 					 * here - if mems_allowed was updated in
2804 					 * parallel, that was a harmless race
2805 					 * between allocation and the cpuset
2806 					 * update
2807 					 */
2808 					return slab;
2809 				}
2810 			}
2811 		}
2812 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
2813 #endif	/* CONFIG_NUMA */
2814 	return NULL;
2815 }
2816 
2817 /*
2818  * Get a partial slab, lock it and return it.
2819  */
2820 static struct slab *get_partial(struct kmem_cache *s, int node,
2821 				struct partial_context *pc)
2822 {
2823 	struct slab *slab;
2824 	int searchnode = node;
2825 
2826 	if (node == NUMA_NO_NODE)
2827 		searchnode = numa_mem_id();
2828 
2829 	slab = get_partial_node(s, get_node(s, searchnode), pc);
2830 	if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
2831 		return slab;
2832 
2833 	return get_any_partial(s, pc);
2834 }
2835 
2836 #ifndef CONFIG_SLUB_TINY
2837 
2838 #ifdef CONFIG_PREEMPTION
2839 /*
2840  * Calculate the next globally unique transaction for disambiguation
2841  * during cmpxchg. The transactions start with the cpu number and are then
2842  * incremented by CONFIG_NR_CPUS.
2843  */
2844 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
2845 #else
2846 /*
2847  * No preemption supported therefore also no need to check for
2848  * different cpus.
2849  */
2850 #define TID_STEP 1
2851 #endif /* CONFIG_PREEMPTION */
2852 
2853 static inline unsigned long next_tid(unsigned long tid)
2854 {
2855 	return tid + TID_STEP;
2856 }
2857 
2858 #ifdef SLUB_DEBUG_CMPXCHG
2859 static inline unsigned int tid_to_cpu(unsigned long tid)
2860 {
2861 	return tid % TID_STEP;
2862 }
2863 
2864 static inline unsigned long tid_to_event(unsigned long tid)
2865 {
2866 	return tid / TID_STEP;
2867 }
2868 #endif
2869 
2870 static inline unsigned int init_tid(int cpu)
2871 {
2872 	return cpu;
2873 }
2874 
2875 static inline void note_cmpxchg_failure(const char *n,
2876 		const struct kmem_cache *s, unsigned long tid)
2877 {
2878 #ifdef SLUB_DEBUG_CMPXCHG
2879 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2880 
2881 	pr_info("%s %s: cmpxchg redo ", n, s->name);
2882 
2883 #ifdef CONFIG_PREEMPTION
2884 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2885 		pr_warn("due to cpu change %d -> %d\n",
2886 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2887 	else
2888 #endif
2889 	if (tid_to_event(tid) != tid_to_event(actual_tid))
2890 		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2891 			tid_to_event(tid), tid_to_event(actual_tid));
2892 	else
2893 		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2894 			actual_tid, tid, next_tid(tid));
2895 #endif
2896 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2897 }
2898 
2899 static void init_kmem_cache_cpus(struct kmem_cache *s)
2900 {
2901 	int cpu;
2902 	struct kmem_cache_cpu *c;
2903 
2904 	for_each_possible_cpu(cpu) {
2905 		c = per_cpu_ptr(s->cpu_slab, cpu);
2906 		local_lock_init(&c->lock);
2907 		c->tid = init_tid(cpu);
2908 	}
2909 }
2910 
2911 /*
2912  * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
2913  * unfreezes the slabs and puts it on the proper list.
2914  * Assumes the slab has been already safely taken away from kmem_cache_cpu
2915  * by the caller.
2916  */
2917 static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
2918 			    void *freelist)
2919 {
2920 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
2921 	int free_delta = 0;
2922 	void *nextfree, *freelist_iter, *freelist_tail;
2923 	int tail = DEACTIVATE_TO_HEAD;
2924 	unsigned long flags = 0;
2925 	struct slab new;
2926 	struct slab old;
2927 
2928 	if (READ_ONCE(slab->freelist)) {
2929 		stat(s, DEACTIVATE_REMOTE_FREES);
2930 		tail = DEACTIVATE_TO_TAIL;
2931 	}
2932 
2933 	/*
2934 	 * Stage one: Count the objects on cpu's freelist as free_delta and
2935 	 * remember the last object in freelist_tail for later splicing.
2936 	 */
2937 	freelist_tail = NULL;
2938 	freelist_iter = freelist;
2939 	while (freelist_iter) {
2940 		nextfree = get_freepointer(s, freelist_iter);
2941 
2942 		/*
2943 		 * If 'nextfree' is invalid, it is possible that the object at
2944 		 * 'freelist_iter' is already corrupted.  So isolate all objects
2945 		 * starting at 'freelist_iter' by skipping them.
2946 		 */
2947 		if (freelist_corrupted(s, slab, &freelist_iter, nextfree))
2948 			break;
2949 
2950 		freelist_tail = freelist_iter;
2951 		free_delta++;
2952 
2953 		freelist_iter = nextfree;
2954 	}
2955 
2956 	/*
2957 	 * Stage two: Unfreeze the slab while splicing the per-cpu
2958 	 * freelist to the head of slab's freelist.
2959 	 */
2960 	do {
2961 		old.freelist = READ_ONCE(slab->freelist);
2962 		old.counters = READ_ONCE(slab->counters);
2963 		VM_BUG_ON(!old.frozen);
2964 
2965 		/* Determine target state of the slab */
2966 		new.counters = old.counters;
2967 		new.frozen = 0;
2968 		if (freelist_tail) {
2969 			new.inuse -= free_delta;
2970 			set_freepointer(s, freelist_tail, old.freelist);
2971 			new.freelist = freelist;
2972 		} else {
2973 			new.freelist = old.freelist;
2974 		}
2975 	} while (!slab_update_freelist(s, slab,
2976 		old.freelist, old.counters,
2977 		new.freelist, new.counters,
2978 		"unfreezing slab"));
2979 
2980 	/*
2981 	 * Stage three: Manipulate the slab list based on the updated state.
2982 	 */
2983 	if (!new.inuse && n->nr_partial >= s->min_partial) {
2984 		stat(s, DEACTIVATE_EMPTY);
2985 		discard_slab(s, slab);
2986 		stat(s, FREE_SLAB);
2987 	} else if (new.freelist) {
2988 		spin_lock_irqsave(&n->list_lock, flags);
2989 		add_partial(n, slab, tail);
2990 		spin_unlock_irqrestore(&n->list_lock, flags);
2991 		stat(s, tail);
2992 	} else {
2993 		stat(s, DEACTIVATE_FULL);
2994 	}
2995 }
2996 
2997 #ifdef CONFIG_SLUB_CPU_PARTIAL
2998 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
2999 {
3000 	struct kmem_cache_node *n = NULL, *n2 = NULL;
3001 	struct slab *slab, *slab_to_discard = NULL;
3002 	unsigned long flags = 0;
3003 
3004 	while (partial_slab) {
3005 		slab = partial_slab;
3006 		partial_slab = slab->next;
3007 
3008 		n2 = get_node(s, slab_nid(slab));
3009 		if (n != n2) {
3010 			if (n)
3011 				spin_unlock_irqrestore(&n->list_lock, flags);
3012 
3013 			n = n2;
3014 			spin_lock_irqsave(&n->list_lock, flags);
3015 		}
3016 
3017 		if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
3018 			slab->next = slab_to_discard;
3019 			slab_to_discard = slab;
3020 		} else {
3021 			add_partial(n, slab, DEACTIVATE_TO_TAIL);
3022 			stat(s, FREE_ADD_PARTIAL);
3023 		}
3024 	}
3025 
3026 	if (n)
3027 		spin_unlock_irqrestore(&n->list_lock, flags);
3028 
3029 	while (slab_to_discard) {
3030 		slab = slab_to_discard;
3031 		slab_to_discard = slab_to_discard->next;
3032 
3033 		stat(s, DEACTIVATE_EMPTY);
3034 		discard_slab(s, slab);
3035 		stat(s, FREE_SLAB);
3036 	}
3037 }
3038 
3039 /*
3040  * Put all the cpu partial slabs to the node partial list.
3041  */
3042 static void put_partials(struct kmem_cache *s)
3043 {
3044 	struct slab *partial_slab;
3045 	unsigned long flags;
3046 
3047 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3048 	partial_slab = this_cpu_read(s->cpu_slab->partial);
3049 	this_cpu_write(s->cpu_slab->partial, NULL);
3050 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3051 
3052 	if (partial_slab)
3053 		__put_partials(s, partial_slab);
3054 }
3055 
3056 static void put_partials_cpu(struct kmem_cache *s,
3057 			     struct kmem_cache_cpu *c)
3058 {
3059 	struct slab *partial_slab;
3060 
3061 	partial_slab = slub_percpu_partial(c);
3062 	c->partial = NULL;
3063 
3064 	if (partial_slab)
3065 		__put_partials(s, partial_slab);
3066 }
3067 
3068 /*
3069  * Put a slab into a partial slab slot if available.
3070  *
3071  * If we did not find a slot then simply move all the partials to the
3072  * per node partial list.
3073  */
3074 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
3075 {
3076 	struct slab *oldslab;
3077 	struct slab *slab_to_put = NULL;
3078 	unsigned long flags;
3079 	int slabs = 0;
3080 
3081 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3082 
3083 	oldslab = this_cpu_read(s->cpu_slab->partial);
3084 
3085 	if (oldslab) {
3086 		if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
3087 			/*
3088 			 * Partial array is full. Move the existing set to the
3089 			 * per node partial list. Postpone the actual unfreezing
3090 			 * outside of the critical section.
3091 			 */
3092 			slab_to_put = oldslab;
3093 			oldslab = NULL;
3094 		} else {
3095 			slabs = oldslab->slabs;
3096 		}
3097 	}
3098 
3099 	slabs++;
3100 
3101 	slab->slabs = slabs;
3102 	slab->next = oldslab;
3103 
3104 	this_cpu_write(s->cpu_slab->partial, slab);
3105 
3106 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3107 
3108 	if (slab_to_put) {
3109 		__put_partials(s, slab_to_put);
3110 		stat(s, CPU_PARTIAL_DRAIN);
3111 	}
3112 }
3113 
3114 #else	/* CONFIG_SLUB_CPU_PARTIAL */
3115 
3116 static inline void put_partials(struct kmem_cache *s) { }
3117 static inline void put_partials_cpu(struct kmem_cache *s,
3118 				    struct kmem_cache_cpu *c) { }
3119 
3120 #endif	/* CONFIG_SLUB_CPU_PARTIAL */
3121 
3122 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
3123 {
3124 	unsigned long flags;
3125 	struct slab *slab;
3126 	void *freelist;
3127 
3128 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3129 
3130 	slab = c->slab;
3131 	freelist = c->freelist;
3132 
3133 	c->slab = NULL;
3134 	c->freelist = NULL;
3135 	c->tid = next_tid(c->tid);
3136 
3137 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3138 
3139 	if (slab) {
3140 		deactivate_slab(s, slab, freelist);
3141 		stat(s, CPUSLAB_FLUSH);
3142 	}
3143 }
3144 
3145 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
3146 {
3147 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3148 	void *freelist = c->freelist;
3149 	struct slab *slab = c->slab;
3150 
3151 	c->slab = NULL;
3152 	c->freelist = NULL;
3153 	c->tid = next_tid(c->tid);
3154 
3155 	if (slab) {
3156 		deactivate_slab(s, slab, freelist);
3157 		stat(s, CPUSLAB_FLUSH);
3158 	}
3159 
3160 	put_partials_cpu(s, c);
3161 }
3162 
3163 struct slub_flush_work {
3164 	struct work_struct work;
3165 	struct kmem_cache *s;
3166 	bool skip;
3167 };
3168 
3169 /*
3170  * Flush cpu slab.
3171  *
3172  * Called from CPU work handler with migration disabled.
3173  */
3174 static void flush_cpu_slab(struct work_struct *w)
3175 {
3176 	struct kmem_cache *s;
3177 	struct kmem_cache_cpu *c;
3178 	struct slub_flush_work *sfw;
3179 
3180 	sfw = container_of(w, struct slub_flush_work, work);
3181 
3182 	s = sfw->s;
3183 	c = this_cpu_ptr(s->cpu_slab);
3184 
3185 	if (c->slab)
3186 		flush_slab(s, c);
3187 
3188 	put_partials(s);
3189 }
3190 
3191 static bool has_cpu_slab(int cpu, struct kmem_cache *s)
3192 {
3193 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3194 
3195 	return c->slab || slub_percpu_partial(c);
3196 }
3197 
3198 static DEFINE_MUTEX(flush_lock);
3199 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
3200 
3201 static void flush_all_cpus_locked(struct kmem_cache *s)
3202 {
3203 	struct slub_flush_work *sfw;
3204 	unsigned int cpu;
3205 
3206 	lockdep_assert_cpus_held();
3207 	mutex_lock(&flush_lock);
3208 
3209 	for_each_online_cpu(cpu) {
3210 		sfw = &per_cpu(slub_flush, cpu);
3211 		if (!has_cpu_slab(cpu, s)) {
3212 			sfw->skip = true;
3213 			continue;
3214 		}
3215 		INIT_WORK(&sfw->work, flush_cpu_slab);
3216 		sfw->skip = false;
3217 		sfw->s = s;
3218 		queue_work_on(cpu, flushwq, &sfw->work);
3219 	}
3220 
3221 	for_each_online_cpu(cpu) {
3222 		sfw = &per_cpu(slub_flush, cpu);
3223 		if (sfw->skip)
3224 			continue;
3225 		flush_work(&sfw->work);
3226 	}
3227 
3228 	mutex_unlock(&flush_lock);
3229 }
3230 
3231 static void flush_all(struct kmem_cache *s)
3232 {
3233 	cpus_read_lock();
3234 	flush_all_cpus_locked(s);
3235 	cpus_read_unlock();
3236 }
3237 
3238 /*
3239  * Use the cpu notifier to insure that the cpu slabs are flushed when
3240  * necessary.
3241  */
3242 static int slub_cpu_dead(unsigned int cpu)
3243 {
3244 	struct kmem_cache *s;
3245 
3246 	mutex_lock(&slab_mutex);
3247 	list_for_each_entry(s, &slab_caches, list)
3248 		__flush_cpu_slab(s, cpu);
3249 	mutex_unlock(&slab_mutex);
3250 	return 0;
3251 }
3252 
3253 #else /* CONFIG_SLUB_TINY */
3254 static inline void flush_all_cpus_locked(struct kmem_cache *s) { }
3255 static inline void flush_all(struct kmem_cache *s) { }
3256 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
3257 static inline int slub_cpu_dead(unsigned int cpu) { return 0; }
3258 #endif /* CONFIG_SLUB_TINY */
3259 
3260 /*
3261  * Check if the objects in a per cpu structure fit numa
3262  * locality expectations.
3263  */
3264 static inline int node_match(struct slab *slab, int node)
3265 {
3266 #ifdef CONFIG_NUMA
3267 	if (node != NUMA_NO_NODE && slab_nid(slab) != node)
3268 		return 0;
3269 #endif
3270 	return 1;
3271 }
3272 
3273 #ifdef CONFIG_SLUB_DEBUG
3274 static int count_free(struct slab *slab)
3275 {
3276 	return slab->objects - slab->inuse;
3277 }
3278 
3279 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
3280 {
3281 	return atomic_long_read(&n->total_objects);
3282 }
3283 
3284 /* Supports checking bulk free of a constructed freelist */
3285 static inline bool free_debug_processing(struct kmem_cache *s,
3286 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
3287 	unsigned long addr, depot_stack_handle_t handle)
3288 {
3289 	bool checks_ok = false;
3290 	void *object = head;
3291 	int cnt = 0;
3292 
3293 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3294 		if (!check_slab(s, slab))
3295 			goto out;
3296 	}
3297 
3298 	if (slab->inuse < *bulk_cnt) {
3299 		slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
3300 			 slab->inuse, *bulk_cnt);
3301 		goto out;
3302 	}
3303 
3304 next_object:
3305 
3306 	if (++cnt > *bulk_cnt)
3307 		goto out_cnt;
3308 
3309 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3310 		if (!free_consistency_checks(s, slab, object, addr))
3311 			goto out;
3312 	}
3313 
3314 	if (s->flags & SLAB_STORE_USER)
3315 		set_track_update(s, object, TRACK_FREE, addr, handle);
3316 	trace(s, slab, object, 0);
3317 	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
3318 	init_object(s, object, SLUB_RED_INACTIVE);
3319 
3320 	/* Reached end of constructed freelist yet? */
3321 	if (object != tail) {
3322 		object = get_freepointer(s, object);
3323 		goto next_object;
3324 	}
3325 	checks_ok = true;
3326 
3327 out_cnt:
3328 	if (cnt != *bulk_cnt) {
3329 		slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3330 			 *bulk_cnt, cnt);
3331 		*bulk_cnt = cnt;
3332 	}
3333 
3334 out:
3335 
3336 	if (!checks_ok)
3337 		slab_fix(s, "Object at 0x%p not freed", object);
3338 
3339 	return checks_ok;
3340 }
3341 #endif /* CONFIG_SLUB_DEBUG */
3342 
3343 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
3344 static unsigned long count_partial(struct kmem_cache_node *n,
3345 					int (*get_count)(struct slab *))
3346 {
3347 	unsigned long flags;
3348 	unsigned long x = 0;
3349 	struct slab *slab;
3350 
3351 	spin_lock_irqsave(&n->list_lock, flags);
3352 	list_for_each_entry(slab, &n->partial, slab_list)
3353 		x += get_count(slab);
3354 	spin_unlock_irqrestore(&n->list_lock, flags);
3355 	return x;
3356 }
3357 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
3358 
3359 #ifdef CONFIG_SLUB_DEBUG
3360 #define MAX_PARTIAL_TO_SCAN 10000
3361 
3362 static unsigned long count_partial_free_approx(struct kmem_cache_node *n)
3363 {
3364 	unsigned long flags;
3365 	unsigned long x = 0;
3366 	struct slab *slab;
3367 
3368 	spin_lock_irqsave(&n->list_lock, flags);
3369 	if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
3370 		list_for_each_entry(slab, &n->partial, slab_list)
3371 			x += slab->objects - slab->inuse;
3372 	} else {
3373 		/*
3374 		 * For a long list, approximate the total count of objects in
3375 		 * it to meet the limit on the number of slabs to scan.
3376 		 * Scan from both the list's head and tail for better accuracy.
3377 		 */
3378 		unsigned long scanned = 0;
3379 
3380 		list_for_each_entry(slab, &n->partial, slab_list) {
3381 			x += slab->objects - slab->inuse;
3382 			if (++scanned == MAX_PARTIAL_TO_SCAN / 2)
3383 				break;
3384 		}
3385 		list_for_each_entry_reverse(slab, &n->partial, slab_list) {
3386 			x += slab->objects - slab->inuse;
3387 			if (++scanned == MAX_PARTIAL_TO_SCAN)
3388 				break;
3389 		}
3390 		x = mult_frac(x, n->nr_partial, scanned);
3391 		x = min(x, node_nr_objs(n));
3392 	}
3393 	spin_unlock_irqrestore(&n->list_lock, flags);
3394 	return x;
3395 }
3396 
3397 static noinline void
3398 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
3399 {
3400 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
3401 				      DEFAULT_RATELIMIT_BURST);
3402 	int node;
3403 	struct kmem_cache_node *n;
3404 
3405 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
3406 		return;
3407 
3408 	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
3409 		nid, gfpflags, &gfpflags);
3410 	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
3411 		s->name, s->object_size, s->size, oo_order(s->oo),
3412 		oo_order(s->min));
3413 
3414 	if (oo_order(s->min) > get_order(s->object_size))
3415 		pr_warn("  %s debugging increased min order, use slab_debug=O to disable.\n",
3416 			s->name);
3417 
3418 	for_each_kmem_cache_node(s, node, n) {
3419 		unsigned long nr_slabs;
3420 		unsigned long nr_objs;
3421 		unsigned long nr_free;
3422 
3423 		nr_free  = count_partial_free_approx(n);
3424 		nr_slabs = node_nr_slabs(n);
3425 		nr_objs  = node_nr_objs(n);
3426 
3427 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
3428 			node, nr_slabs, nr_objs, nr_free);
3429 	}
3430 }
3431 #else /* CONFIG_SLUB_DEBUG */
3432 static inline void
3433 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
3434 #endif
3435 
3436 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
3437 {
3438 	if (unlikely(slab_test_pfmemalloc(slab)))
3439 		return gfp_pfmemalloc_allowed(gfpflags);
3440 
3441 	return true;
3442 }
3443 
3444 #ifndef CONFIG_SLUB_TINY
3445 static inline bool
3446 __update_cpu_freelist_fast(struct kmem_cache *s,
3447 			   void *freelist_old, void *freelist_new,
3448 			   unsigned long tid)
3449 {
3450 	freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
3451 	freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
3452 
3453 	return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
3454 					     &old.full, new.full);
3455 }
3456 
3457 /*
3458  * Check the slab->freelist and either transfer the freelist to the
3459  * per cpu freelist or deactivate the slab.
3460  *
3461  * The slab is still frozen if the return value is not NULL.
3462  *
3463  * If this function returns NULL then the slab has been unfrozen.
3464  */
3465 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
3466 {
3467 	struct slab new;
3468 	unsigned long counters;
3469 	void *freelist;
3470 
3471 	lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3472 
3473 	do {
3474 		freelist = slab->freelist;
3475 		counters = slab->counters;
3476 
3477 		new.counters = counters;
3478 
3479 		new.inuse = slab->objects;
3480 		new.frozen = freelist != NULL;
3481 
3482 	} while (!__slab_update_freelist(s, slab,
3483 		freelist, counters,
3484 		NULL, new.counters,
3485 		"get_freelist"));
3486 
3487 	return freelist;
3488 }
3489 
3490 /*
3491  * Freeze the partial slab and return the pointer to the freelist.
3492  */
3493 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
3494 {
3495 	struct slab new;
3496 	unsigned long counters;
3497 	void *freelist;
3498 
3499 	do {
3500 		freelist = slab->freelist;
3501 		counters = slab->counters;
3502 
3503 		new.counters = counters;
3504 		VM_BUG_ON(new.frozen);
3505 
3506 		new.inuse = slab->objects;
3507 		new.frozen = 1;
3508 
3509 	} while (!slab_update_freelist(s, slab,
3510 		freelist, counters,
3511 		NULL, new.counters,
3512 		"freeze_slab"));
3513 
3514 	return freelist;
3515 }
3516 
3517 /*
3518  * Slow path. The lockless freelist is empty or we need to perform
3519  * debugging duties.
3520  *
3521  * Processing is still very fast if new objects have been freed to the
3522  * regular freelist. In that case we simply take over the regular freelist
3523  * as the lockless freelist and zap the regular freelist.
3524  *
3525  * If that is not working then we fall back to the partial lists. We take the
3526  * first element of the freelist as the object to allocate now and move the
3527  * rest of the freelist to the lockless freelist.
3528  *
3529  * And if we were unable to get a new slab from the partial slab lists then
3530  * we need to allocate a new slab. This is the slowest path since it involves
3531  * a call to the page allocator and the setup of a new slab.
3532  *
3533  * Version of __slab_alloc to use when we know that preemption is
3534  * already disabled (which is the case for bulk allocation).
3535  */
3536 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3537 			  unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3538 {
3539 	void *freelist;
3540 	struct slab *slab;
3541 	unsigned long flags;
3542 	struct partial_context pc;
3543 	bool try_thisnode = true;
3544 
3545 	stat(s, ALLOC_SLOWPATH);
3546 
3547 reread_slab:
3548 
3549 	slab = READ_ONCE(c->slab);
3550 	if (!slab) {
3551 		/*
3552 		 * if the node is not online or has no normal memory, just
3553 		 * ignore the node constraint
3554 		 */
3555 		if (unlikely(node != NUMA_NO_NODE &&
3556 			     !node_isset(node, slab_nodes)))
3557 			node = NUMA_NO_NODE;
3558 		goto new_slab;
3559 	}
3560 
3561 	if (unlikely(!node_match(slab, node))) {
3562 		/*
3563 		 * same as above but node_match() being false already
3564 		 * implies node != NUMA_NO_NODE
3565 		 */
3566 		if (!node_isset(node, slab_nodes)) {
3567 			node = NUMA_NO_NODE;
3568 		} else {
3569 			stat(s, ALLOC_NODE_MISMATCH);
3570 			goto deactivate_slab;
3571 		}
3572 	}
3573 
3574 	/*
3575 	 * By rights, we should be searching for a slab page that was
3576 	 * PFMEMALLOC but right now, we are losing the pfmemalloc
3577 	 * information when the page leaves the per-cpu allocator
3578 	 */
3579 	if (unlikely(!pfmemalloc_match(slab, gfpflags)))
3580 		goto deactivate_slab;
3581 
3582 	/* must check again c->slab in case we got preempted and it changed */
3583 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3584 	if (unlikely(slab != c->slab)) {
3585 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3586 		goto reread_slab;
3587 	}
3588 	freelist = c->freelist;
3589 	if (freelist)
3590 		goto load_freelist;
3591 
3592 	freelist = get_freelist(s, slab);
3593 
3594 	if (!freelist) {
3595 		c->slab = NULL;
3596 		c->tid = next_tid(c->tid);
3597 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3598 		stat(s, DEACTIVATE_BYPASS);
3599 		goto new_slab;
3600 	}
3601 
3602 	stat(s, ALLOC_REFILL);
3603 
3604 load_freelist:
3605 
3606 	lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3607 
3608 	/*
3609 	 * freelist is pointing to the list of objects to be used.
3610 	 * slab is pointing to the slab from which the objects are obtained.
3611 	 * That slab must be frozen for per cpu allocations to work.
3612 	 */
3613 	VM_BUG_ON(!c->slab->frozen);
3614 	c->freelist = get_freepointer(s, freelist);
3615 	c->tid = next_tid(c->tid);
3616 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3617 	return freelist;
3618 
3619 deactivate_slab:
3620 
3621 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3622 	if (slab != c->slab) {
3623 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3624 		goto reread_slab;
3625 	}
3626 	freelist = c->freelist;
3627 	c->slab = NULL;
3628 	c->freelist = NULL;
3629 	c->tid = next_tid(c->tid);
3630 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3631 	deactivate_slab(s, slab, freelist);
3632 
3633 new_slab:
3634 
3635 #ifdef CONFIG_SLUB_CPU_PARTIAL
3636 	while (slub_percpu_partial(c)) {
3637 		local_lock_irqsave(&s->cpu_slab->lock, flags);
3638 		if (unlikely(c->slab)) {
3639 			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3640 			goto reread_slab;
3641 		}
3642 		if (unlikely(!slub_percpu_partial(c))) {
3643 			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3644 			/* we were preempted and partial list got empty */
3645 			goto new_objects;
3646 		}
3647 
3648 		slab = slub_percpu_partial(c);
3649 		slub_set_percpu_partial(c, slab);
3650 
3651 		if (likely(node_match(slab, node) &&
3652 			   pfmemalloc_match(slab, gfpflags))) {
3653 			c->slab = slab;
3654 			freelist = get_freelist(s, slab);
3655 			VM_BUG_ON(!freelist);
3656 			stat(s, CPU_PARTIAL_ALLOC);
3657 			goto load_freelist;
3658 		}
3659 
3660 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3661 
3662 		slab->next = NULL;
3663 		__put_partials(s, slab);
3664 	}
3665 #endif
3666 
3667 new_objects:
3668 
3669 	pc.flags = gfpflags;
3670 	/*
3671 	 * When a preferred node is indicated but no __GFP_THISNODE
3672 	 *
3673 	 * 1) try to get a partial slab from target node only by having
3674 	 *    __GFP_THISNODE in pc.flags for get_partial()
3675 	 * 2) if 1) failed, try to allocate a new slab from target node with
3676 	 *    GPF_NOWAIT | __GFP_THISNODE opportunistically
3677 	 * 3) if 2) failed, retry with original gfpflags which will allow
3678 	 *    get_partial() try partial lists of other nodes before potentially
3679 	 *    allocating new page from other nodes
3680 	 */
3681 	if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
3682 		     && try_thisnode))
3683 		pc.flags = GFP_NOWAIT | __GFP_THISNODE;
3684 
3685 	pc.orig_size = orig_size;
3686 	slab = get_partial(s, node, &pc);
3687 	if (slab) {
3688 		if (kmem_cache_debug(s)) {
3689 			freelist = pc.object;
3690 			/*
3691 			 * For debug caches here we had to go through
3692 			 * alloc_single_from_partial() so just store the
3693 			 * tracking info and return the object.
3694 			 */
3695 			if (s->flags & SLAB_STORE_USER)
3696 				set_track(s, freelist, TRACK_ALLOC, addr);
3697 
3698 			return freelist;
3699 		}
3700 
3701 		freelist = freeze_slab(s, slab);
3702 		goto retry_load_slab;
3703 	}
3704 
3705 	slub_put_cpu_ptr(s->cpu_slab);
3706 	slab = new_slab(s, pc.flags, node);
3707 	c = slub_get_cpu_ptr(s->cpu_slab);
3708 
3709 	if (unlikely(!slab)) {
3710 		if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
3711 		    && try_thisnode) {
3712 			try_thisnode = false;
3713 			goto new_objects;
3714 		}
3715 		slab_out_of_memory(s, gfpflags, node);
3716 		return NULL;
3717 	}
3718 
3719 	stat(s, ALLOC_SLAB);
3720 
3721 	if (kmem_cache_debug(s)) {
3722 		freelist = alloc_single_from_new_slab(s, slab, orig_size);
3723 
3724 		if (unlikely(!freelist))
3725 			goto new_objects;
3726 
3727 		if (s->flags & SLAB_STORE_USER)
3728 			set_track(s, freelist, TRACK_ALLOC, addr);
3729 
3730 		return freelist;
3731 	}
3732 
3733 	/*
3734 	 * No other reference to the slab yet so we can
3735 	 * muck around with it freely without cmpxchg
3736 	 */
3737 	freelist = slab->freelist;
3738 	slab->freelist = NULL;
3739 	slab->inuse = slab->objects;
3740 	slab->frozen = 1;
3741 
3742 	inc_slabs_node(s, slab_nid(slab), slab->objects);
3743 
3744 	if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
3745 		/*
3746 		 * For !pfmemalloc_match() case we don't load freelist so that
3747 		 * we don't make further mismatched allocations easier.
3748 		 */
3749 		deactivate_slab(s, slab, get_freepointer(s, freelist));
3750 		return freelist;
3751 	}
3752 
3753 retry_load_slab:
3754 
3755 	local_lock_irqsave(&s->cpu_slab->lock, flags);
3756 	if (unlikely(c->slab)) {
3757 		void *flush_freelist = c->freelist;
3758 		struct slab *flush_slab = c->slab;
3759 
3760 		c->slab = NULL;
3761 		c->freelist = NULL;
3762 		c->tid = next_tid(c->tid);
3763 
3764 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3765 
3766 		deactivate_slab(s, flush_slab, flush_freelist);
3767 
3768 		stat(s, CPUSLAB_FLUSH);
3769 
3770 		goto retry_load_slab;
3771 	}
3772 	c->slab = slab;
3773 
3774 	goto load_freelist;
3775 }
3776 
3777 /*
3778  * A wrapper for ___slab_alloc() for contexts where preemption is not yet
3779  * disabled. Compensates for possible cpu changes by refetching the per cpu area
3780  * pointer.
3781  */
3782 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3783 			  unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3784 {
3785 	void *p;
3786 
3787 #ifdef CONFIG_PREEMPT_COUNT
3788 	/*
3789 	 * We may have been preempted and rescheduled on a different
3790 	 * cpu before disabling preemption. Need to reload cpu area
3791 	 * pointer.
3792 	 */
3793 	c = slub_get_cpu_ptr(s->cpu_slab);
3794 #endif
3795 
3796 	p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
3797 #ifdef CONFIG_PREEMPT_COUNT
3798 	slub_put_cpu_ptr(s->cpu_slab);
3799 #endif
3800 	return p;
3801 }
3802 
3803 static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
3804 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3805 {
3806 	struct kmem_cache_cpu *c;
3807 	struct slab *slab;
3808 	unsigned long tid;
3809 	void *object;
3810 
3811 redo:
3812 	/*
3813 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
3814 	 * enabled. We may switch back and forth between cpus while
3815 	 * reading from one cpu area. That does not matter as long
3816 	 * as we end up on the original cpu again when doing the cmpxchg.
3817 	 *
3818 	 * We must guarantee that tid and kmem_cache_cpu are retrieved on the
3819 	 * same cpu. We read first the kmem_cache_cpu pointer and use it to read
3820 	 * the tid. If we are preempted and switched to another cpu between the
3821 	 * two reads, it's OK as the two are still associated with the same cpu
3822 	 * and cmpxchg later will validate the cpu.
3823 	 */
3824 	c = raw_cpu_ptr(s->cpu_slab);
3825 	tid = READ_ONCE(c->tid);
3826 
3827 	/*
3828 	 * Irqless object alloc/free algorithm used here depends on sequence
3829 	 * of fetching cpu_slab's data. tid should be fetched before anything
3830 	 * on c to guarantee that object and slab associated with previous tid
3831 	 * won't be used with current tid. If we fetch tid first, object and
3832 	 * slab could be one associated with next tid and our alloc/free
3833 	 * request will be failed. In this case, we will retry. So, no problem.
3834 	 */
3835 	barrier();
3836 
3837 	/*
3838 	 * The transaction ids are globally unique per cpu and per operation on
3839 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
3840 	 * occurs on the right processor and that there was no operation on the
3841 	 * linked list in between.
3842 	 */
3843 
3844 	object = c->freelist;
3845 	slab = c->slab;
3846 
3847 	if (!USE_LOCKLESS_FAST_PATH() ||
3848 	    unlikely(!object || !slab || !node_match(slab, node))) {
3849 		object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
3850 	} else {
3851 		void *next_object = get_freepointer_safe(s, object);
3852 
3853 		/*
3854 		 * The cmpxchg will only match if there was no additional
3855 		 * operation and if we are on the right processor.
3856 		 *
3857 		 * The cmpxchg does the following atomically (without lock
3858 		 * semantics!)
3859 		 * 1. Relocate first pointer to the current per cpu area.
3860 		 * 2. Verify that tid and freelist have not been changed
3861 		 * 3. If they were not changed replace tid and freelist
3862 		 *
3863 		 * Since this is without lock semantics the protection is only
3864 		 * against code executing on this cpu *not* from access by
3865 		 * other cpus.
3866 		 */
3867 		if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) {
3868 			note_cmpxchg_failure("slab_alloc", s, tid);
3869 			goto redo;
3870 		}
3871 		prefetch_freepointer(s, next_object);
3872 		stat(s, ALLOC_FASTPATH);
3873 	}
3874 
3875 	return object;
3876 }
3877 #else /* CONFIG_SLUB_TINY */
3878 static void *__slab_alloc_node(struct kmem_cache *s,
3879 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3880 {
3881 	struct partial_context pc;
3882 	struct slab *slab;
3883 	void *object;
3884 
3885 	pc.flags = gfpflags;
3886 	pc.orig_size = orig_size;
3887 	slab = get_partial(s, node, &pc);
3888 
3889 	if (slab)
3890 		return pc.object;
3891 
3892 	slab = new_slab(s, gfpflags, node);
3893 	if (unlikely(!slab)) {
3894 		slab_out_of_memory(s, gfpflags, node);
3895 		return NULL;
3896 	}
3897 
3898 	object = alloc_single_from_new_slab(s, slab, orig_size);
3899 
3900 	return object;
3901 }
3902 #endif /* CONFIG_SLUB_TINY */
3903 
3904 /*
3905  * If the object has been wiped upon free, make sure it's fully initialized by
3906  * zeroing out freelist pointer.
3907  */
3908 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
3909 						   void *obj)
3910 {
3911 	if (unlikely(slab_want_init_on_free(s)) && obj &&
3912 	    !freeptr_outside_object(s))
3913 		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
3914 			0, sizeof(void *));
3915 }
3916 
3917 noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
3918 {
3919 	if (__should_failslab(s, gfpflags))
3920 		return -ENOMEM;
3921 	return 0;
3922 }
3923 ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
3924 
3925 static __fastpath_inline
3926 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
3927 {
3928 	flags &= gfp_allowed_mask;
3929 
3930 	might_alloc(flags);
3931 
3932 	if (unlikely(should_failslab(s, flags)))
3933 		return NULL;
3934 
3935 	return s;
3936 }
3937 
3938 static __fastpath_inline
3939 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3940 			  gfp_t flags, size_t size, void **p, bool init,
3941 			  unsigned int orig_size)
3942 {
3943 	unsigned int zero_size = s->object_size;
3944 	bool kasan_init = init;
3945 	size_t i;
3946 	gfp_t init_flags = flags & gfp_allowed_mask;
3947 
3948 	/*
3949 	 * For kmalloc object, the allocated memory size(object_size) is likely
3950 	 * larger than the requested size(orig_size). If redzone check is
3951 	 * enabled for the extra space, don't zero it, as it will be redzoned
3952 	 * soon. The redzone operation for this extra space could be seen as a
3953 	 * replacement of current poisoning under certain debug option, and
3954 	 * won't break other sanity checks.
3955 	 */
3956 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
3957 	    (s->flags & SLAB_KMALLOC))
3958 		zero_size = orig_size;
3959 
3960 	/*
3961 	 * When slab_debug is enabled, avoid memory initialization integrated
3962 	 * into KASAN and instead zero out the memory via the memset below with
3963 	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
3964 	 * cause false-positive reports. This does not lead to a performance
3965 	 * penalty on production builds, as slab_debug is not intended to be
3966 	 * enabled there.
3967 	 */
3968 	if (__slub_debug_enabled())
3969 		kasan_init = false;
3970 
3971 	/*
3972 	 * As memory initialization might be integrated into KASAN,
3973 	 * kasan_slab_alloc and initialization memset must be
3974 	 * kept together to avoid discrepancies in behavior.
3975 	 *
3976 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
3977 	 */
3978 	for (i = 0; i < size; i++) {
3979 		p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
3980 		if (p[i] && init && (!kasan_init ||
3981 				     !kasan_has_integrated_init()))
3982 			memset(p[i], 0, zero_size);
3983 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
3984 					 s->flags, init_flags);
3985 		kmsan_slab_alloc(s, p[i], init_flags);
3986 		alloc_tagging_slab_alloc_hook(s, p[i], flags);
3987 	}
3988 
3989 	return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
3990 }
3991 
3992 /*
3993  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
3994  * have the fastpath folded into their functions. So no function call
3995  * overhead for requests that can be satisfied on the fastpath.
3996  *
3997  * The fastpath works by first checking if the lockless freelist can be used.
3998  * If not then __slab_alloc is called for slow processing.
3999  *
4000  * Otherwise we can simply pick the next object from the lockless free list.
4001  */
4002 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
4003 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4004 {
4005 	void *object;
4006 	bool init = false;
4007 
4008 	s = slab_pre_alloc_hook(s, gfpflags);
4009 	if (unlikely(!s))
4010 		return NULL;
4011 
4012 	object = kfence_alloc(s, orig_size, gfpflags);
4013 	if (unlikely(object))
4014 		goto out;
4015 
4016 	object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
4017 
4018 	maybe_wipe_obj_freeptr(s, object);
4019 	init = slab_want_init_on_alloc(gfpflags, s);
4020 
4021 out:
4022 	/*
4023 	 * When init equals 'true', like for kzalloc() family, only
4024 	 * @orig_size bytes might be zeroed instead of s->object_size
4025 	 * In case this fails due to memcg_slab_post_alloc_hook(),
4026 	 * object is set to NULL
4027 	 */
4028 	slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
4029 
4030 	return object;
4031 }
4032 
4033 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
4034 {
4035 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
4036 				    s->object_size);
4037 
4038 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4039 
4040 	return ret;
4041 }
4042 EXPORT_SYMBOL(kmem_cache_alloc_noprof);
4043 
4044 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
4045 			   gfp_t gfpflags)
4046 {
4047 	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
4048 				    s->object_size);
4049 
4050 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4051 
4052 	return ret;
4053 }
4054 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
4055 
4056 /**
4057  * kmem_cache_alloc_node - Allocate an object on the specified node
4058  * @s: The cache to allocate from.
4059  * @gfpflags: See kmalloc().
4060  * @node: node number of the target node.
4061  *
4062  * Identical to kmem_cache_alloc but it will allocate memory on the given
4063  * node, which can improve the performance for cpu bound structures.
4064  *
4065  * Fallback to other node is possible if __GFP_THISNODE is not set.
4066  *
4067  * Return: pointer to the new object or %NULL in case of error
4068  */
4069 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
4070 {
4071 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4072 
4073 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
4074 
4075 	return ret;
4076 }
4077 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4078 
4079 /*
4080  * To avoid unnecessary overhead, we pass through large allocation requests
4081  * directly to the page allocator. We use __GFP_COMP, because we will need to
4082  * know the allocation order to free the pages properly in kfree.
4083  */
4084 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
4085 {
4086 	struct folio *folio;
4087 	void *ptr = NULL;
4088 	unsigned int order = get_order(size);
4089 
4090 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
4091 		flags = kmalloc_fix_flags(flags);
4092 
4093 	flags |= __GFP_COMP;
4094 	folio = (struct folio *)alloc_pages_node_noprof(node, flags, order);
4095 	if (folio) {
4096 		ptr = folio_address(folio);
4097 		lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4098 				      PAGE_SIZE << order);
4099 	}
4100 
4101 	ptr = kasan_kmalloc_large(ptr, size, flags);
4102 	/* As ptr might get tagged, call kmemleak hook after KASAN. */
4103 	kmemleak_alloc(ptr, size, 1, flags);
4104 	kmsan_kmalloc_large(ptr, size, flags);
4105 
4106 	return ptr;
4107 }
4108 
4109 void *__kmalloc_large_noprof(size_t size, gfp_t flags)
4110 {
4111 	void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
4112 
4113 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4114 		      flags, NUMA_NO_NODE);
4115 	return ret;
4116 }
4117 EXPORT_SYMBOL(__kmalloc_large_noprof);
4118 
4119 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
4120 {
4121 	void *ret = ___kmalloc_large_node(size, flags, node);
4122 
4123 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4124 		      flags, node);
4125 	return ret;
4126 }
4127 EXPORT_SYMBOL(__kmalloc_large_node_noprof);
4128 
4129 static __always_inline
4130 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
4131 			unsigned long caller)
4132 {
4133 	struct kmem_cache *s;
4134 	void *ret;
4135 
4136 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4137 		ret = __kmalloc_large_node_noprof(size, flags, node);
4138 		trace_kmalloc(caller, ret, size,
4139 			      PAGE_SIZE << get_order(size), flags, node);
4140 		return ret;
4141 	}
4142 
4143 	if (unlikely(!size))
4144 		return ZERO_SIZE_PTR;
4145 
4146 	s = kmalloc_slab(size, b, flags, caller);
4147 
4148 	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
4149 	ret = kasan_kmalloc(s, ret, size, flags);
4150 	trace_kmalloc(caller, ret, size, s->size, flags, node);
4151 	return ret;
4152 }
4153 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
4154 {
4155 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
4156 }
4157 EXPORT_SYMBOL(__kmalloc_node_noprof);
4158 
4159 void *__kmalloc_noprof(size_t size, gfp_t flags)
4160 {
4161 	return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
4162 }
4163 EXPORT_SYMBOL(__kmalloc_noprof);
4164 
4165 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
4166 					 int node, unsigned long caller)
4167 {
4168 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
4169 
4170 }
4171 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
4172 
4173 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
4174 {
4175 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
4176 					    _RET_IP_, size);
4177 
4178 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
4179 
4180 	ret = kasan_kmalloc(s, ret, size, gfpflags);
4181 	return ret;
4182 }
4183 EXPORT_SYMBOL(__kmalloc_cache_noprof);
4184 
4185 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
4186 				  int node, size_t size)
4187 {
4188 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
4189 
4190 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4191 
4192 	ret = kasan_kmalloc(s, ret, size, gfpflags);
4193 	return ret;
4194 }
4195 EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
4196 
4197 static noinline void free_to_partial_list(
4198 	struct kmem_cache *s, struct slab *slab,
4199 	void *head, void *tail, int bulk_cnt,
4200 	unsigned long addr)
4201 {
4202 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
4203 	struct slab *slab_free = NULL;
4204 	int cnt = bulk_cnt;
4205 	unsigned long flags;
4206 	depot_stack_handle_t handle = 0;
4207 
4208 	if (s->flags & SLAB_STORE_USER)
4209 		handle = set_track_prepare();
4210 
4211 	spin_lock_irqsave(&n->list_lock, flags);
4212 
4213 	if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
4214 		void *prior = slab->freelist;
4215 
4216 		/* Perform the actual freeing while we still hold the locks */
4217 		slab->inuse -= cnt;
4218 		set_freepointer(s, tail, prior);
4219 		slab->freelist = head;
4220 
4221 		/*
4222 		 * If the slab is empty, and node's partial list is full,
4223 		 * it should be discarded anyway no matter it's on full or
4224 		 * partial list.
4225 		 */
4226 		if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
4227 			slab_free = slab;
4228 
4229 		if (!prior) {
4230 			/* was on full list */
4231 			remove_full(s, n, slab);
4232 			if (!slab_free) {
4233 				add_partial(n, slab, DEACTIVATE_TO_TAIL);
4234 				stat(s, FREE_ADD_PARTIAL);
4235 			}
4236 		} else if (slab_free) {
4237 			remove_partial(n, slab);
4238 			stat(s, FREE_REMOVE_PARTIAL);
4239 		}
4240 	}
4241 
4242 	if (slab_free) {
4243 		/*
4244 		 * Update the counters while still holding n->list_lock to
4245 		 * prevent spurious validation warnings
4246 		 */
4247 		dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
4248 	}
4249 
4250 	spin_unlock_irqrestore(&n->list_lock, flags);
4251 
4252 	if (slab_free) {
4253 		stat(s, FREE_SLAB);
4254 		free_slab(s, slab_free);
4255 	}
4256 }
4257 
4258 /*
4259  * Slow path handling. This may still be called frequently since objects
4260  * have a longer lifetime than the cpu slabs in most processing loads.
4261  *
4262  * So we still attempt to reduce cache line usage. Just take the slab
4263  * lock and free the item. If there is no additional partial slab
4264  * handling required then we can return immediately.
4265  */
4266 static void __slab_free(struct kmem_cache *s, struct slab *slab,
4267 			void *head, void *tail, int cnt,
4268 			unsigned long addr)
4269 
4270 {
4271 	void *prior;
4272 	int was_frozen;
4273 	struct slab new;
4274 	unsigned long counters;
4275 	struct kmem_cache_node *n = NULL;
4276 	unsigned long flags;
4277 	bool on_node_partial;
4278 
4279 	stat(s, FREE_SLOWPATH);
4280 
4281 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4282 		free_to_partial_list(s, slab, head, tail, cnt, addr);
4283 		return;
4284 	}
4285 
4286 	do {
4287 		if (unlikely(n)) {
4288 			spin_unlock_irqrestore(&n->list_lock, flags);
4289 			n = NULL;
4290 		}
4291 		prior = slab->freelist;
4292 		counters = slab->counters;
4293 		set_freepointer(s, tail, prior);
4294 		new.counters = counters;
4295 		was_frozen = new.frozen;
4296 		new.inuse -= cnt;
4297 		if ((!new.inuse || !prior) && !was_frozen) {
4298 			/* Needs to be taken off a list */
4299 			if (!kmem_cache_has_cpu_partial(s) || prior) {
4300 
4301 				n = get_node(s, slab_nid(slab));
4302 				/*
4303 				 * Speculatively acquire the list_lock.
4304 				 * If the cmpxchg does not succeed then we may
4305 				 * drop the list_lock without any processing.
4306 				 *
4307 				 * Otherwise the list_lock will synchronize with
4308 				 * other processors updating the list of slabs.
4309 				 */
4310 				spin_lock_irqsave(&n->list_lock, flags);
4311 
4312 				on_node_partial = slab_test_node_partial(slab);
4313 			}
4314 		}
4315 
4316 	} while (!slab_update_freelist(s, slab,
4317 		prior, counters,
4318 		head, new.counters,
4319 		"__slab_free"));
4320 
4321 	if (likely(!n)) {
4322 
4323 		if (likely(was_frozen)) {
4324 			/*
4325 			 * The list lock was not taken therefore no list
4326 			 * activity can be necessary.
4327 			 */
4328 			stat(s, FREE_FROZEN);
4329 		} else if (kmem_cache_has_cpu_partial(s) && !prior) {
4330 			/*
4331 			 * If we started with a full slab then put it onto the
4332 			 * per cpu partial list.
4333 			 */
4334 			put_cpu_partial(s, slab, 1);
4335 			stat(s, CPU_PARTIAL_FREE);
4336 		}
4337 
4338 		return;
4339 	}
4340 
4341 	/*
4342 	 * This slab was partially empty but not on the per-node partial list,
4343 	 * in which case we shouldn't manipulate its list, just return.
4344 	 */
4345 	if (prior && !on_node_partial) {
4346 		spin_unlock_irqrestore(&n->list_lock, flags);
4347 		return;
4348 	}
4349 
4350 	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
4351 		goto slab_empty;
4352 
4353 	/*
4354 	 * Objects left in the slab. If it was not on the partial list before
4355 	 * then add it.
4356 	 */
4357 	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
4358 		add_partial(n, slab, DEACTIVATE_TO_TAIL);
4359 		stat(s, FREE_ADD_PARTIAL);
4360 	}
4361 	spin_unlock_irqrestore(&n->list_lock, flags);
4362 	return;
4363 
4364 slab_empty:
4365 	if (prior) {
4366 		/*
4367 		 * Slab on the partial list.
4368 		 */
4369 		remove_partial(n, slab);
4370 		stat(s, FREE_REMOVE_PARTIAL);
4371 	}
4372 
4373 	spin_unlock_irqrestore(&n->list_lock, flags);
4374 	stat(s, FREE_SLAB);
4375 	discard_slab(s, slab);
4376 }
4377 
4378 #ifndef CONFIG_SLUB_TINY
4379 /*
4380  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
4381  * can perform fastpath freeing without additional function calls.
4382  *
4383  * The fastpath is only possible if we are freeing to the current cpu slab
4384  * of this processor. This typically the case if we have just allocated
4385  * the item before.
4386  *
4387  * If fastpath is not possible then fall back to __slab_free where we deal
4388  * with all sorts of special processing.
4389  *
4390  * Bulk free of a freelist with several objects (all pointing to the
4391  * same slab) possible by specifying head and tail ptr, plus objects
4392  * count (cnt). Bulk free indicated by tail pointer being set.
4393  */
4394 static __always_inline void do_slab_free(struct kmem_cache *s,
4395 				struct slab *slab, void *head, void *tail,
4396 				int cnt, unsigned long addr)
4397 {
4398 	struct kmem_cache_cpu *c;
4399 	unsigned long tid;
4400 	void **freelist;
4401 
4402 redo:
4403 	/*
4404 	 * Determine the currently cpus per cpu slab.
4405 	 * The cpu may change afterward. However that does not matter since
4406 	 * data is retrieved via this pointer. If we are on the same cpu
4407 	 * during the cmpxchg then the free will succeed.
4408 	 */
4409 	c = raw_cpu_ptr(s->cpu_slab);
4410 	tid = READ_ONCE(c->tid);
4411 
4412 	/* Same with comment on barrier() in __slab_alloc_node() */
4413 	barrier();
4414 
4415 	if (unlikely(slab != c->slab)) {
4416 		__slab_free(s, slab, head, tail, cnt, addr);
4417 		return;
4418 	}
4419 
4420 	if (USE_LOCKLESS_FAST_PATH()) {
4421 		freelist = READ_ONCE(c->freelist);
4422 
4423 		set_freepointer(s, tail, freelist);
4424 
4425 		if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
4426 			note_cmpxchg_failure("slab_free", s, tid);
4427 			goto redo;
4428 		}
4429 	} else {
4430 		/* Update the free list under the local lock */
4431 		local_lock(&s->cpu_slab->lock);
4432 		c = this_cpu_ptr(s->cpu_slab);
4433 		if (unlikely(slab != c->slab)) {
4434 			local_unlock(&s->cpu_slab->lock);
4435 			goto redo;
4436 		}
4437 		tid = c->tid;
4438 		freelist = c->freelist;
4439 
4440 		set_freepointer(s, tail, freelist);
4441 		c->freelist = head;
4442 		c->tid = next_tid(tid);
4443 
4444 		local_unlock(&s->cpu_slab->lock);
4445 	}
4446 	stat_add(s, FREE_FASTPATH, cnt);
4447 }
4448 #else /* CONFIG_SLUB_TINY */
4449 static void do_slab_free(struct kmem_cache *s,
4450 				struct slab *slab, void *head, void *tail,
4451 				int cnt, unsigned long addr)
4452 {
4453 	__slab_free(s, slab, head, tail, cnt, addr);
4454 }
4455 #endif /* CONFIG_SLUB_TINY */
4456 
4457 static __fastpath_inline
4458 void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
4459 	       unsigned long addr)
4460 {
4461 	memcg_slab_free_hook(s, slab, &object, 1);
4462 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
4463 
4464 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
4465 		do_slab_free(s, slab, object, object, 1, addr);
4466 }
4467 
4468 #ifdef CONFIG_MEMCG_KMEM
4469 /* Do not inline the rare memcg charging failed path into the allocation path */
4470 static noinline
4471 void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
4472 {
4473 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
4474 		do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
4475 }
4476 #endif
4477 
4478 static __fastpath_inline
4479 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
4480 		    void *tail, void **p, int cnt, unsigned long addr)
4481 {
4482 	memcg_slab_free_hook(s, slab, p, cnt);
4483 	alloc_tagging_slab_free_hook(s, slab, p, cnt);
4484 	/*
4485 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
4486 	 * to remove objects, whose reuse must be delayed.
4487 	 */
4488 	if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt)))
4489 		do_slab_free(s, slab, head, tail, cnt, addr);
4490 }
4491 
4492 #ifdef CONFIG_KASAN_GENERIC
4493 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
4494 {
4495 	do_slab_free(cache, virt_to_slab(x), x, x, 1, addr);
4496 }
4497 #endif
4498 
4499 static inline struct kmem_cache *virt_to_cache(const void *obj)
4500 {
4501 	struct slab *slab;
4502 
4503 	slab = virt_to_slab(obj);
4504 	if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
4505 		return NULL;
4506 	return slab->slab_cache;
4507 }
4508 
4509 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
4510 {
4511 	struct kmem_cache *cachep;
4512 
4513 	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
4514 	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
4515 		return s;
4516 
4517 	cachep = virt_to_cache(x);
4518 	if (WARN(cachep && cachep != s,
4519 		 "%s: Wrong slab cache. %s but object is from %s\n",
4520 		 __func__, s->name, cachep->name))
4521 		print_tracking(cachep, x);
4522 	return cachep;
4523 }
4524 
4525 /**
4526  * kmem_cache_free - Deallocate an object
4527  * @s: The cache the allocation was from.
4528  * @x: The previously allocated object.
4529  *
4530  * Free an object which was previously allocated from this
4531  * cache.
4532  */
4533 void kmem_cache_free(struct kmem_cache *s, void *x)
4534 {
4535 	s = cache_from_obj(s, x);
4536 	if (!s)
4537 		return;
4538 	trace_kmem_cache_free(_RET_IP_, x, s);
4539 	slab_free(s, virt_to_slab(x), x, _RET_IP_);
4540 }
4541 EXPORT_SYMBOL(kmem_cache_free);
4542 
4543 static void free_large_kmalloc(struct folio *folio, void *object)
4544 {
4545 	unsigned int order = folio_order(folio);
4546 
4547 	if (WARN_ON_ONCE(order == 0))
4548 		pr_warn_once("object pointer: 0x%p\n", object);
4549 
4550 	kmemleak_free(object);
4551 	kasan_kfree_large(object);
4552 	kmsan_kfree_large(object);
4553 
4554 	lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4555 			      -(PAGE_SIZE << order));
4556 	folio_put(folio);
4557 }
4558 
4559 /**
4560  * kfree - free previously allocated memory
4561  * @object: pointer returned by kmalloc() or kmem_cache_alloc()
4562  *
4563  * If @object is NULL, no operation is performed.
4564  */
4565 void kfree(const void *object)
4566 {
4567 	struct folio *folio;
4568 	struct slab *slab;
4569 	struct kmem_cache *s;
4570 	void *x = (void *)object;
4571 
4572 	trace_kfree(_RET_IP_, object);
4573 
4574 	if (unlikely(ZERO_OR_NULL_PTR(object)))
4575 		return;
4576 
4577 	folio = virt_to_folio(object);
4578 	if (unlikely(!folio_test_slab(folio))) {
4579 		free_large_kmalloc(folio, (void *)object);
4580 		return;
4581 	}
4582 
4583 	slab = folio_slab(folio);
4584 	s = slab->slab_cache;
4585 	slab_free(s, slab, x, _RET_IP_);
4586 }
4587 EXPORT_SYMBOL(kfree);
4588 
4589 struct detached_freelist {
4590 	struct slab *slab;
4591 	void *tail;
4592 	void *freelist;
4593 	int cnt;
4594 	struct kmem_cache *s;
4595 };
4596 
4597 /*
4598  * This function progressively scans the array with free objects (with
4599  * a limited look ahead) and extract objects belonging to the same
4600  * slab.  It builds a detached freelist directly within the given
4601  * slab/objects.  This can happen without any need for
4602  * synchronization, because the objects are owned by running process.
4603  * The freelist is build up as a single linked list in the objects.
4604  * The idea is, that this detached freelist can then be bulk
4605  * transferred to the real freelist(s), but only requiring a single
4606  * synchronization primitive.  Look ahead in the array is limited due
4607  * to performance reasons.
4608  */
4609 static inline
4610 int build_detached_freelist(struct kmem_cache *s, size_t size,
4611 			    void **p, struct detached_freelist *df)
4612 {
4613 	int lookahead = 3;
4614 	void *object;
4615 	struct folio *folio;
4616 	size_t same;
4617 
4618 	object = p[--size];
4619 	folio = virt_to_folio(object);
4620 	if (!s) {
4621 		/* Handle kalloc'ed objects */
4622 		if (unlikely(!folio_test_slab(folio))) {
4623 			free_large_kmalloc(folio, object);
4624 			df->slab = NULL;
4625 			return size;
4626 		}
4627 		/* Derive kmem_cache from object */
4628 		df->slab = folio_slab(folio);
4629 		df->s = df->slab->slab_cache;
4630 	} else {
4631 		df->slab = folio_slab(folio);
4632 		df->s = cache_from_obj(s, object); /* Support for memcg */
4633 	}
4634 
4635 	/* Start new detached freelist */
4636 	df->tail = object;
4637 	df->freelist = object;
4638 	df->cnt = 1;
4639 
4640 	if (is_kfence_address(object))
4641 		return size;
4642 
4643 	set_freepointer(df->s, object, NULL);
4644 
4645 	same = size;
4646 	while (size) {
4647 		object = p[--size];
4648 		/* df->slab is always set at this point */
4649 		if (df->slab == virt_to_slab(object)) {
4650 			/* Opportunity build freelist */
4651 			set_freepointer(df->s, object, df->freelist);
4652 			df->freelist = object;
4653 			df->cnt++;
4654 			same--;
4655 			if (size != same)
4656 				swap(p[size], p[same]);
4657 			continue;
4658 		}
4659 
4660 		/* Limit look ahead search */
4661 		if (!--lookahead)
4662 			break;
4663 	}
4664 
4665 	return same;
4666 }
4667 
4668 /*
4669  * Internal bulk free of objects that were not initialised by the post alloc
4670  * hooks and thus should not be processed by the free hooks
4671  */
4672 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4673 {
4674 	if (!size)
4675 		return;
4676 
4677 	do {
4678 		struct detached_freelist df;
4679 
4680 		size = build_detached_freelist(s, size, p, &df);
4681 		if (!df.slab)
4682 			continue;
4683 
4684 		do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
4685 			     _RET_IP_);
4686 	} while (likely(size));
4687 }
4688 
4689 /* Note that interrupts must be enabled when calling this function. */
4690 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4691 {
4692 	if (!size)
4693 		return;
4694 
4695 	do {
4696 		struct detached_freelist df;
4697 
4698 		size = build_detached_freelist(s, size, p, &df);
4699 		if (!df.slab)
4700 			continue;
4701 
4702 		slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
4703 			       df.cnt, _RET_IP_);
4704 	} while (likely(size));
4705 }
4706 EXPORT_SYMBOL(kmem_cache_free_bulk);
4707 
4708 #ifndef CONFIG_SLUB_TINY
4709 static inline
4710 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
4711 			    void **p)
4712 {
4713 	struct kmem_cache_cpu *c;
4714 	unsigned long irqflags;
4715 	int i;
4716 
4717 	/*
4718 	 * Drain objects in the per cpu slab, while disabling local
4719 	 * IRQs, which protects against PREEMPT and interrupts
4720 	 * handlers invoking normal fastpath.
4721 	 */
4722 	c = slub_get_cpu_ptr(s->cpu_slab);
4723 	local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4724 
4725 	for (i = 0; i < size; i++) {
4726 		void *object = kfence_alloc(s, s->object_size, flags);
4727 
4728 		if (unlikely(object)) {
4729 			p[i] = object;
4730 			continue;
4731 		}
4732 
4733 		object = c->freelist;
4734 		if (unlikely(!object)) {
4735 			/*
4736 			 * We may have removed an object from c->freelist using
4737 			 * the fastpath in the previous iteration; in that case,
4738 			 * c->tid has not been bumped yet.
4739 			 * Since ___slab_alloc() may reenable interrupts while
4740 			 * allocating memory, we should bump c->tid now.
4741 			 */
4742 			c->tid = next_tid(c->tid);
4743 
4744 			local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4745 
4746 			/*
4747 			 * Invoking slow path likely have side-effect
4748 			 * of re-populating per CPU c->freelist
4749 			 */
4750 			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
4751 					    _RET_IP_, c, s->object_size);
4752 			if (unlikely(!p[i]))
4753 				goto error;
4754 
4755 			c = this_cpu_ptr(s->cpu_slab);
4756 			maybe_wipe_obj_freeptr(s, p[i]);
4757 
4758 			local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4759 
4760 			continue; /* goto for-loop */
4761 		}
4762 		c->freelist = get_freepointer(s, object);
4763 		p[i] = object;
4764 		maybe_wipe_obj_freeptr(s, p[i]);
4765 		stat(s, ALLOC_FASTPATH);
4766 	}
4767 	c->tid = next_tid(c->tid);
4768 	local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4769 	slub_put_cpu_ptr(s->cpu_slab);
4770 
4771 	return i;
4772 
4773 error:
4774 	slub_put_cpu_ptr(s->cpu_slab);
4775 	__kmem_cache_free_bulk(s, i, p);
4776 	return 0;
4777 
4778 }
4779 #else /* CONFIG_SLUB_TINY */
4780 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
4781 				   size_t size, void **p)
4782 {
4783 	int i;
4784 
4785 	for (i = 0; i < size; i++) {
4786 		void *object = kfence_alloc(s, s->object_size, flags);
4787 
4788 		if (unlikely(object)) {
4789 			p[i] = object;
4790 			continue;
4791 		}
4792 
4793 		p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
4794 					 _RET_IP_, s->object_size);
4795 		if (unlikely(!p[i]))
4796 			goto error;
4797 
4798 		maybe_wipe_obj_freeptr(s, p[i]);
4799 	}
4800 
4801 	return i;
4802 
4803 error:
4804 	__kmem_cache_free_bulk(s, i, p);
4805 	return 0;
4806 }
4807 #endif /* CONFIG_SLUB_TINY */
4808 
4809 /* Note that interrupts must be enabled when calling this function. */
4810 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
4811 				 void **p)
4812 {
4813 	int i;
4814 
4815 	if (!size)
4816 		return 0;
4817 
4818 	s = slab_pre_alloc_hook(s, flags);
4819 	if (unlikely(!s))
4820 		return 0;
4821 
4822 	i = __kmem_cache_alloc_bulk(s, flags, size, p);
4823 	if (unlikely(i == 0))
4824 		return 0;
4825 
4826 	/*
4827 	 * memcg and kmem_cache debug support and memory initialization.
4828 	 * Done outside of the IRQ disabled fastpath loop.
4829 	 */
4830 	if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
4831 		    slab_want_init_on_alloc(flags, s), s->object_size))) {
4832 		return 0;
4833 	}
4834 	return i;
4835 }
4836 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
4837 
4838 
4839 /*
4840  * Object placement in a slab is made very easy because we always start at
4841  * offset 0. If we tune the size of the object to the alignment then we can
4842  * get the required alignment by putting one properly sized object after
4843  * another.
4844  *
4845  * Notice that the allocation order determines the sizes of the per cpu
4846  * caches. Each processor has always one slab available for allocations.
4847  * Increasing the allocation order reduces the number of times that slabs
4848  * must be moved on and off the partial lists and is therefore a factor in
4849  * locking overhead.
4850  */
4851 
4852 /*
4853  * Minimum / Maximum order of slab pages. This influences locking overhead
4854  * and slab fragmentation. A higher order reduces the number of partial slabs
4855  * and increases the number of allocations possible without having to
4856  * take the list_lock.
4857  */
4858 static unsigned int slub_min_order;
4859 static unsigned int slub_max_order =
4860 	IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
4861 static unsigned int slub_min_objects;
4862 
4863 /*
4864  * Calculate the order of allocation given an slab object size.
4865  *
4866  * The order of allocation has significant impact on performance and other
4867  * system components. Generally order 0 allocations should be preferred since
4868  * order 0 does not cause fragmentation in the page allocator. Larger objects
4869  * be problematic to put into order 0 slabs because there may be too much
4870  * unused space left. We go to a higher order if more than 1/16th of the slab
4871  * would be wasted.
4872  *
4873  * In order to reach satisfactory performance we must ensure that a minimum
4874  * number of objects is in one slab. Otherwise we may generate too much
4875  * activity on the partial lists which requires taking the list_lock. This is
4876  * less a concern for large slabs though which are rarely used.
4877  *
4878  * slab_max_order specifies the order where we begin to stop considering the
4879  * number of objects in a slab as critical. If we reach slab_max_order then
4880  * we try to keep the page order as low as possible. So we accept more waste
4881  * of space in favor of a small page order.
4882  *
4883  * Higher order allocations also allow the placement of more objects in a
4884  * slab and thereby reduce object handling overhead. If the user has
4885  * requested a higher minimum order then we start with that one instead of
4886  * the smallest order which will fit the object.
4887  */
4888 static inline unsigned int calc_slab_order(unsigned int size,
4889 		unsigned int min_order, unsigned int max_order,
4890 		unsigned int fract_leftover)
4891 {
4892 	unsigned int order;
4893 
4894 	for (order = min_order; order <= max_order; order++) {
4895 
4896 		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
4897 		unsigned int rem;
4898 
4899 		rem = slab_size % size;
4900 
4901 		if (rem <= slab_size / fract_leftover)
4902 			break;
4903 	}
4904 
4905 	return order;
4906 }
4907 
4908 static inline int calculate_order(unsigned int size)
4909 {
4910 	unsigned int order;
4911 	unsigned int min_objects;
4912 	unsigned int max_objects;
4913 	unsigned int min_order;
4914 
4915 	min_objects = slub_min_objects;
4916 	if (!min_objects) {
4917 		/*
4918 		 * Some architectures will only update present cpus when
4919 		 * onlining them, so don't trust the number if it's just 1. But
4920 		 * we also don't want to use nr_cpu_ids always, as on some other
4921 		 * architectures, there can be many possible cpus, but never
4922 		 * onlined. Here we compromise between trying to avoid too high
4923 		 * order on systems that appear larger than they are, and too
4924 		 * low order on systems that appear smaller than they are.
4925 		 */
4926 		unsigned int nr_cpus = num_present_cpus();
4927 		if (nr_cpus <= 1)
4928 			nr_cpus = nr_cpu_ids;
4929 		min_objects = 4 * (fls(nr_cpus) + 1);
4930 	}
4931 	/* min_objects can't be 0 because get_order(0) is undefined */
4932 	max_objects = max(order_objects(slub_max_order, size), 1U);
4933 	min_objects = min(min_objects, max_objects);
4934 
4935 	min_order = max_t(unsigned int, slub_min_order,
4936 			  get_order(min_objects * size));
4937 	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
4938 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
4939 
4940 	/*
4941 	 * Attempt to find best configuration for a slab. This works by first
4942 	 * attempting to generate a layout with the best possible configuration
4943 	 * and backing off gradually.
4944 	 *
4945 	 * We start with accepting at most 1/16 waste and try to find the
4946 	 * smallest order from min_objects-derived/slab_min_order up to
4947 	 * slab_max_order that will satisfy the constraint. Note that increasing
4948 	 * the order can only result in same or less fractional waste, not more.
4949 	 *
4950 	 * If that fails, we increase the acceptable fraction of waste and try
4951 	 * again. The last iteration with fraction of 1/2 would effectively
4952 	 * accept any waste and give us the order determined by min_objects, as
4953 	 * long as at least single object fits within slab_max_order.
4954 	 */
4955 	for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
4956 		order = calc_slab_order(size, min_order, slub_max_order,
4957 					fraction);
4958 		if (order <= slub_max_order)
4959 			return order;
4960 	}
4961 
4962 	/*
4963 	 * Doh this slab cannot be placed using slab_max_order.
4964 	 */
4965 	order = get_order(size);
4966 	if (order <= MAX_PAGE_ORDER)
4967 		return order;
4968 	return -ENOSYS;
4969 }
4970 
4971 static void
4972 init_kmem_cache_node(struct kmem_cache_node *n)
4973 {
4974 	n->nr_partial = 0;
4975 	spin_lock_init(&n->list_lock);
4976 	INIT_LIST_HEAD(&n->partial);
4977 #ifdef CONFIG_SLUB_DEBUG
4978 	atomic_long_set(&n->nr_slabs, 0);
4979 	atomic_long_set(&n->total_objects, 0);
4980 	INIT_LIST_HEAD(&n->full);
4981 #endif
4982 }
4983 
4984 #ifndef CONFIG_SLUB_TINY
4985 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4986 {
4987 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
4988 			NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
4989 			sizeof(struct kmem_cache_cpu));
4990 
4991 	/*
4992 	 * Must align to double word boundary for the double cmpxchg
4993 	 * instructions to work; see __pcpu_double_call_return_bool().
4994 	 */
4995 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
4996 				     2 * sizeof(void *));
4997 
4998 	if (!s->cpu_slab)
4999 		return 0;
5000 
5001 	init_kmem_cache_cpus(s);
5002 
5003 	return 1;
5004 }
5005 #else
5006 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
5007 {
5008 	return 1;
5009 }
5010 #endif /* CONFIG_SLUB_TINY */
5011 
5012 static struct kmem_cache *kmem_cache_node;
5013 
5014 /*
5015  * No kmalloc_node yet so do it by hand. We know that this is the first
5016  * slab on the node for this slabcache. There are no concurrent accesses
5017  * possible.
5018  *
5019  * Note that this function only works on the kmem_cache_node
5020  * when allocating for the kmem_cache_node. This is used for bootstrapping
5021  * memory on a fresh node that has no slab structures yet.
5022  */
5023 static void early_kmem_cache_node_alloc(int node)
5024 {
5025 	struct slab *slab;
5026 	struct kmem_cache_node *n;
5027 
5028 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
5029 
5030 	slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
5031 
5032 	BUG_ON(!slab);
5033 	if (slab_nid(slab) != node) {
5034 		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
5035 		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
5036 	}
5037 
5038 	n = slab->freelist;
5039 	BUG_ON(!n);
5040 #ifdef CONFIG_SLUB_DEBUG
5041 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
5042 #endif
5043 	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
5044 	slab->freelist = get_freepointer(kmem_cache_node, n);
5045 	slab->inuse = 1;
5046 	kmem_cache_node->node[node] = n;
5047 	init_kmem_cache_node(n);
5048 	inc_slabs_node(kmem_cache_node, node, slab->objects);
5049 
5050 	/*
5051 	 * No locks need to be taken here as it has just been
5052 	 * initialized and there is no concurrent access.
5053 	 */
5054 	__add_partial(n, slab, DEACTIVATE_TO_HEAD);
5055 }
5056 
5057 static void free_kmem_cache_nodes(struct kmem_cache *s)
5058 {
5059 	int node;
5060 	struct kmem_cache_node *n;
5061 
5062 	for_each_kmem_cache_node(s, node, n) {
5063 		s->node[node] = NULL;
5064 		kmem_cache_free(kmem_cache_node, n);
5065 	}
5066 }
5067 
5068 void __kmem_cache_release(struct kmem_cache *s)
5069 {
5070 	cache_random_seq_destroy(s);
5071 #ifndef CONFIG_SLUB_TINY
5072 	free_percpu(s->cpu_slab);
5073 #endif
5074 	free_kmem_cache_nodes(s);
5075 }
5076 
5077 static int init_kmem_cache_nodes(struct kmem_cache *s)
5078 {
5079 	int node;
5080 
5081 	for_each_node_mask(node, slab_nodes) {
5082 		struct kmem_cache_node *n;
5083 
5084 		if (slab_state == DOWN) {
5085 			early_kmem_cache_node_alloc(node);
5086 			continue;
5087 		}
5088 		n = kmem_cache_alloc_node(kmem_cache_node,
5089 						GFP_KERNEL, node);
5090 
5091 		if (!n) {
5092 			free_kmem_cache_nodes(s);
5093 			return 0;
5094 		}
5095 
5096 		init_kmem_cache_node(n);
5097 		s->node[node] = n;
5098 	}
5099 	return 1;
5100 }
5101 
5102 static void set_cpu_partial(struct kmem_cache *s)
5103 {
5104 #ifdef CONFIG_SLUB_CPU_PARTIAL
5105 	unsigned int nr_objects;
5106 
5107 	/*
5108 	 * cpu_partial determined the maximum number of objects kept in the
5109 	 * per cpu partial lists of a processor.
5110 	 *
5111 	 * Per cpu partial lists mainly contain slabs that just have one
5112 	 * object freed. If they are used for allocation then they can be
5113 	 * filled up again with minimal effort. The slab will never hit the
5114 	 * per node partial lists and therefore no locking will be required.
5115 	 *
5116 	 * For backwards compatibility reasons, this is determined as number
5117 	 * of objects, even though we now limit maximum number of pages, see
5118 	 * slub_set_cpu_partial()
5119 	 */
5120 	if (!kmem_cache_has_cpu_partial(s))
5121 		nr_objects = 0;
5122 	else if (s->size >= PAGE_SIZE)
5123 		nr_objects = 6;
5124 	else if (s->size >= 1024)
5125 		nr_objects = 24;
5126 	else if (s->size >= 256)
5127 		nr_objects = 52;
5128 	else
5129 		nr_objects = 120;
5130 
5131 	slub_set_cpu_partial(s, nr_objects);
5132 #endif
5133 }
5134 
5135 /*
5136  * calculate_sizes() determines the order and the distribution of data within
5137  * a slab object.
5138  */
5139 static int calculate_sizes(struct kmem_cache *s)
5140 {
5141 	slab_flags_t flags = s->flags;
5142 	unsigned int size = s->object_size;
5143 	unsigned int order;
5144 
5145 	/*
5146 	 * Round up object size to the next word boundary. We can only
5147 	 * place the free pointer at word boundaries and this determines
5148 	 * the possible location of the free pointer.
5149 	 */
5150 	size = ALIGN(size, sizeof(void *));
5151 
5152 #ifdef CONFIG_SLUB_DEBUG
5153 	/*
5154 	 * Determine if we can poison the object itself. If the user of
5155 	 * the slab may touch the object after free or before allocation
5156 	 * then we should never poison the object itself.
5157 	 */
5158 	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
5159 			!s->ctor)
5160 		s->flags |= __OBJECT_POISON;
5161 	else
5162 		s->flags &= ~__OBJECT_POISON;
5163 
5164 
5165 	/*
5166 	 * If we are Redzoning then check if there is some space between the
5167 	 * end of the object and the free pointer. If not then add an
5168 	 * additional word to have some bytes to store Redzone information.
5169 	 */
5170 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
5171 		size += sizeof(void *);
5172 #endif
5173 
5174 	/*
5175 	 * With that we have determined the number of bytes in actual use
5176 	 * by the object and redzoning.
5177 	 */
5178 	s->inuse = size;
5179 
5180 	if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
5181 	    ((flags & SLAB_RED_ZONE) &&
5182 	     (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
5183 		/*
5184 		 * Relocate free pointer after the object if it is not
5185 		 * permitted to overwrite the first word of the object on
5186 		 * kmem_cache_free.
5187 		 *
5188 		 * This is the case if we do RCU, have a constructor or
5189 		 * destructor, are poisoning the objects, or are
5190 		 * redzoning an object smaller than sizeof(void *) or are
5191 		 * redzoning an object with slub_debug_orig_size() enabled,
5192 		 * in which case the right redzone may be extended.
5193 		 *
5194 		 * The assumption that s->offset >= s->inuse means free
5195 		 * pointer is outside of the object is used in the
5196 		 * freeptr_outside_object() function. If that is no
5197 		 * longer true, the function needs to be modified.
5198 		 */
5199 		s->offset = size;
5200 		size += sizeof(void *);
5201 	} else {
5202 		/*
5203 		 * Store freelist pointer near middle of object to keep
5204 		 * it away from the edges of the object to avoid small
5205 		 * sized over/underflows from neighboring allocations.
5206 		 */
5207 		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
5208 	}
5209 
5210 #ifdef CONFIG_SLUB_DEBUG
5211 	if (flags & SLAB_STORE_USER) {
5212 		/*
5213 		 * Need to store information about allocs and frees after
5214 		 * the object.
5215 		 */
5216 		size += 2 * sizeof(struct track);
5217 
5218 		/* Save the original kmalloc request size */
5219 		if (flags & SLAB_KMALLOC)
5220 			size += sizeof(unsigned int);
5221 	}
5222 #endif
5223 
5224 	kasan_cache_create(s, &size, &s->flags);
5225 #ifdef CONFIG_SLUB_DEBUG
5226 	if (flags & SLAB_RED_ZONE) {
5227 		/*
5228 		 * Add some empty padding so that we can catch
5229 		 * overwrites from earlier objects rather than let
5230 		 * tracking information or the free pointer be
5231 		 * corrupted if a user writes before the start
5232 		 * of the object.
5233 		 */
5234 		size += sizeof(void *);
5235 
5236 		s->red_left_pad = sizeof(void *);
5237 		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
5238 		size += s->red_left_pad;
5239 	}
5240 #endif
5241 
5242 	/*
5243 	 * SLUB stores one object immediately after another beginning from
5244 	 * offset 0. In order to align the objects we have to simply size
5245 	 * each object to conform to the alignment.
5246 	 */
5247 	size = ALIGN(size, s->align);
5248 	s->size = size;
5249 	s->reciprocal_size = reciprocal_value(size);
5250 	order = calculate_order(size);
5251 
5252 	if ((int)order < 0)
5253 		return 0;
5254 
5255 	s->allocflags = __GFP_COMP;
5256 
5257 	if (s->flags & SLAB_CACHE_DMA)
5258 		s->allocflags |= GFP_DMA;
5259 
5260 	if (s->flags & SLAB_CACHE_DMA32)
5261 		s->allocflags |= GFP_DMA32;
5262 
5263 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5264 		s->allocflags |= __GFP_RECLAIMABLE;
5265 
5266 	/*
5267 	 * Determine the number of objects per slab
5268 	 */
5269 	s->oo = oo_make(order, size);
5270 	s->min = oo_make(get_order(size), size);
5271 
5272 	return !!oo_objects(s->oo);
5273 }
5274 
5275 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
5276 {
5277 	s->flags = kmem_cache_flags(flags, s->name);
5278 #ifdef CONFIG_SLAB_FREELIST_HARDENED
5279 	s->random = get_random_long();
5280 #endif
5281 
5282 	if (!calculate_sizes(s))
5283 		goto error;
5284 	if (disable_higher_order_debug) {
5285 		/*
5286 		 * Disable debugging flags that store metadata if the min slab
5287 		 * order increased.
5288 		 */
5289 		if (get_order(s->size) > get_order(s->object_size)) {
5290 			s->flags &= ~DEBUG_METADATA_FLAGS;
5291 			s->offset = 0;
5292 			if (!calculate_sizes(s))
5293 				goto error;
5294 		}
5295 	}
5296 
5297 #ifdef system_has_freelist_aba
5298 	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
5299 		/* Enable fast mode */
5300 		s->flags |= __CMPXCHG_DOUBLE;
5301 	}
5302 #endif
5303 
5304 	/*
5305 	 * The larger the object size is, the more slabs we want on the partial
5306 	 * list to avoid pounding the page allocator excessively.
5307 	 */
5308 	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
5309 	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
5310 
5311 	set_cpu_partial(s);
5312 
5313 #ifdef CONFIG_NUMA
5314 	s->remote_node_defrag_ratio = 1000;
5315 #endif
5316 
5317 	/* Initialize the pre-computed randomized freelist if slab is up */
5318 	if (slab_state >= UP) {
5319 		if (init_cache_random_seq(s))
5320 			goto error;
5321 	}
5322 
5323 	if (!init_kmem_cache_nodes(s))
5324 		goto error;
5325 
5326 	if (alloc_kmem_cache_cpus(s))
5327 		return 0;
5328 
5329 error:
5330 	__kmem_cache_release(s);
5331 	return -EINVAL;
5332 }
5333 
5334 static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
5335 			      const char *text)
5336 {
5337 #ifdef CONFIG_SLUB_DEBUG
5338 	void *addr = slab_address(slab);
5339 	void *p;
5340 
5341 	slab_err(s, slab, text, s->name);
5342 
5343 	spin_lock(&object_map_lock);
5344 	__fill_map(object_map, s, slab);
5345 
5346 	for_each_object(p, s, addr, slab->objects) {
5347 
5348 		if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
5349 			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
5350 			print_tracking(s, p);
5351 		}
5352 	}
5353 	spin_unlock(&object_map_lock);
5354 #endif
5355 }
5356 
5357 /*
5358  * Attempt to free all partial slabs on a node.
5359  * This is called from __kmem_cache_shutdown(). We must take list_lock
5360  * because sysfs file might still access partial list after the shutdowning.
5361  */
5362 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
5363 {
5364 	LIST_HEAD(discard);
5365 	struct slab *slab, *h;
5366 
5367 	BUG_ON(irqs_disabled());
5368 	spin_lock_irq(&n->list_lock);
5369 	list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
5370 		if (!slab->inuse) {
5371 			remove_partial(n, slab);
5372 			list_add(&slab->slab_list, &discard);
5373 		} else {
5374 			list_slab_objects(s, slab,
5375 			  "Objects remaining in %s on __kmem_cache_shutdown()");
5376 		}
5377 	}
5378 	spin_unlock_irq(&n->list_lock);
5379 
5380 	list_for_each_entry_safe(slab, h, &discard, slab_list)
5381 		discard_slab(s, slab);
5382 }
5383 
5384 bool __kmem_cache_empty(struct kmem_cache *s)
5385 {
5386 	int node;
5387 	struct kmem_cache_node *n;
5388 
5389 	for_each_kmem_cache_node(s, node, n)
5390 		if (n->nr_partial || node_nr_slabs(n))
5391 			return false;
5392 	return true;
5393 }
5394 
5395 /*
5396  * Release all resources used by a slab cache.
5397  */
5398 int __kmem_cache_shutdown(struct kmem_cache *s)
5399 {
5400 	int node;
5401 	struct kmem_cache_node *n;
5402 
5403 	flush_all_cpus_locked(s);
5404 	/* Attempt to free all objects */
5405 	for_each_kmem_cache_node(s, node, n) {
5406 		free_partial(s, n);
5407 		if (n->nr_partial || node_nr_slabs(n))
5408 			return 1;
5409 	}
5410 	return 0;
5411 }
5412 
5413 #ifdef CONFIG_PRINTK
5414 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
5415 {
5416 	void *base;
5417 	int __maybe_unused i;
5418 	unsigned int objnr;
5419 	void *objp;
5420 	void *objp0;
5421 	struct kmem_cache *s = slab->slab_cache;
5422 	struct track __maybe_unused *trackp;
5423 
5424 	kpp->kp_ptr = object;
5425 	kpp->kp_slab = slab;
5426 	kpp->kp_slab_cache = s;
5427 	base = slab_address(slab);
5428 	objp0 = kasan_reset_tag(object);
5429 #ifdef CONFIG_SLUB_DEBUG
5430 	objp = restore_red_left(s, objp0);
5431 #else
5432 	objp = objp0;
5433 #endif
5434 	objnr = obj_to_index(s, slab, objp);
5435 	kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
5436 	objp = base + s->size * objnr;
5437 	kpp->kp_objp = objp;
5438 	if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
5439 			 || (objp - base) % s->size) ||
5440 	    !(s->flags & SLAB_STORE_USER))
5441 		return;
5442 #ifdef CONFIG_SLUB_DEBUG
5443 	objp = fixup_red_left(s, objp);
5444 	trackp = get_track(s, objp, TRACK_ALLOC);
5445 	kpp->kp_ret = (void *)trackp->addr;
5446 #ifdef CONFIG_STACKDEPOT
5447 	{
5448 		depot_stack_handle_t handle;
5449 		unsigned long *entries;
5450 		unsigned int nr_entries;
5451 
5452 		handle = READ_ONCE(trackp->handle);
5453 		if (handle) {
5454 			nr_entries = stack_depot_fetch(handle, &entries);
5455 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5456 				kpp->kp_stack[i] = (void *)entries[i];
5457 		}
5458 
5459 		trackp = get_track(s, objp, TRACK_FREE);
5460 		handle = READ_ONCE(trackp->handle);
5461 		if (handle) {
5462 			nr_entries = stack_depot_fetch(handle, &entries);
5463 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5464 				kpp->kp_free_stack[i] = (void *)entries[i];
5465 		}
5466 	}
5467 #endif
5468 #endif
5469 }
5470 #endif
5471 
5472 /********************************************************************
5473  *		Kmalloc subsystem
5474  *******************************************************************/
5475 
5476 static int __init setup_slub_min_order(char *str)
5477 {
5478 	get_option(&str, (int *)&slub_min_order);
5479 
5480 	if (slub_min_order > slub_max_order)
5481 		slub_max_order = slub_min_order;
5482 
5483 	return 1;
5484 }
5485 
5486 __setup("slab_min_order=", setup_slub_min_order);
5487 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0);
5488 
5489 
5490 static int __init setup_slub_max_order(char *str)
5491 {
5492 	get_option(&str, (int *)&slub_max_order);
5493 	slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
5494 
5495 	if (slub_min_order > slub_max_order)
5496 		slub_min_order = slub_max_order;
5497 
5498 	return 1;
5499 }
5500 
5501 __setup("slab_max_order=", setup_slub_max_order);
5502 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0);
5503 
5504 static int __init setup_slub_min_objects(char *str)
5505 {
5506 	get_option(&str, (int *)&slub_min_objects);
5507 
5508 	return 1;
5509 }
5510 
5511 __setup("slab_min_objects=", setup_slub_min_objects);
5512 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);
5513 
5514 #ifdef CONFIG_HARDENED_USERCOPY
5515 /*
5516  * Rejects incorrectly sized objects and objects that are to be copied
5517  * to/from userspace but do not fall entirely within the containing slab
5518  * cache's usercopy region.
5519  *
5520  * Returns NULL if check passes, otherwise const char * to name of cache
5521  * to indicate an error.
5522  */
5523 void __check_heap_object(const void *ptr, unsigned long n,
5524 			 const struct slab *slab, bool to_user)
5525 {
5526 	struct kmem_cache *s;
5527 	unsigned int offset;
5528 	bool is_kfence = is_kfence_address(ptr);
5529 
5530 	ptr = kasan_reset_tag(ptr);
5531 
5532 	/* Find object and usable object size. */
5533 	s = slab->slab_cache;
5534 
5535 	/* Reject impossible pointers. */
5536 	if (ptr < slab_address(slab))
5537 		usercopy_abort("SLUB object not in SLUB page?!", NULL,
5538 			       to_user, 0, n);
5539 
5540 	/* Find offset within object. */
5541 	if (is_kfence)
5542 		offset = ptr - kfence_object_start(ptr);
5543 	else
5544 		offset = (ptr - slab_address(slab)) % s->size;
5545 
5546 	/* Adjust for redzone and reject if within the redzone. */
5547 	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
5548 		if (offset < s->red_left_pad)
5549 			usercopy_abort("SLUB object in left red zone",
5550 				       s->name, to_user, offset, n);
5551 		offset -= s->red_left_pad;
5552 	}
5553 
5554 	/* Allow address range falling entirely within usercopy region. */
5555 	if (offset >= s->useroffset &&
5556 	    offset - s->useroffset <= s->usersize &&
5557 	    n <= s->useroffset - offset + s->usersize)
5558 		return;
5559 
5560 	usercopy_abort("SLUB object", s->name, to_user, offset, n);
5561 }
5562 #endif /* CONFIG_HARDENED_USERCOPY */
5563 
5564 #define SHRINK_PROMOTE_MAX 32
5565 
5566 /*
5567  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
5568  * up most to the head of the partial lists. New allocations will then
5569  * fill those up and thus they can be removed from the partial lists.
5570  *
5571  * The slabs with the least items are placed last. This results in them
5572  * being allocated from last increasing the chance that the last objects
5573  * are freed in them.
5574  */
5575 static int __kmem_cache_do_shrink(struct kmem_cache *s)
5576 {
5577 	int node;
5578 	int i;
5579 	struct kmem_cache_node *n;
5580 	struct slab *slab;
5581 	struct slab *t;
5582 	struct list_head discard;
5583 	struct list_head promote[SHRINK_PROMOTE_MAX];
5584 	unsigned long flags;
5585 	int ret = 0;
5586 
5587 	for_each_kmem_cache_node(s, node, n) {
5588 		INIT_LIST_HEAD(&discard);
5589 		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
5590 			INIT_LIST_HEAD(promote + i);
5591 
5592 		spin_lock_irqsave(&n->list_lock, flags);
5593 
5594 		/*
5595 		 * Build lists of slabs to discard or promote.
5596 		 *
5597 		 * Note that concurrent frees may occur while we hold the
5598 		 * list_lock. slab->inuse here is the upper limit.
5599 		 */
5600 		list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
5601 			int free = slab->objects - slab->inuse;
5602 
5603 			/* Do not reread slab->inuse */
5604 			barrier();
5605 
5606 			/* We do not keep full slabs on the list */
5607 			BUG_ON(free <= 0);
5608 
5609 			if (free == slab->objects) {
5610 				list_move(&slab->slab_list, &discard);
5611 				slab_clear_node_partial(slab);
5612 				n->nr_partial--;
5613 				dec_slabs_node(s, node, slab->objects);
5614 			} else if (free <= SHRINK_PROMOTE_MAX)
5615 				list_move(&slab->slab_list, promote + free - 1);
5616 		}
5617 
5618 		/*
5619 		 * Promote the slabs filled up most to the head of the
5620 		 * partial list.
5621 		 */
5622 		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
5623 			list_splice(promote + i, &n->partial);
5624 
5625 		spin_unlock_irqrestore(&n->list_lock, flags);
5626 
5627 		/* Release empty slabs */
5628 		list_for_each_entry_safe(slab, t, &discard, slab_list)
5629 			free_slab(s, slab);
5630 
5631 		if (node_nr_slabs(n))
5632 			ret = 1;
5633 	}
5634 
5635 	return ret;
5636 }
5637 
5638 int __kmem_cache_shrink(struct kmem_cache *s)
5639 {
5640 	flush_all(s);
5641 	return __kmem_cache_do_shrink(s);
5642 }
5643 
5644 static int slab_mem_going_offline_callback(void *arg)
5645 {
5646 	struct kmem_cache *s;
5647 
5648 	mutex_lock(&slab_mutex);
5649 	list_for_each_entry(s, &slab_caches, list) {
5650 		flush_all_cpus_locked(s);
5651 		__kmem_cache_do_shrink(s);
5652 	}
5653 	mutex_unlock(&slab_mutex);
5654 
5655 	return 0;
5656 }
5657 
5658 static void slab_mem_offline_callback(void *arg)
5659 {
5660 	struct memory_notify *marg = arg;
5661 	int offline_node;
5662 
5663 	offline_node = marg->status_change_nid_normal;
5664 
5665 	/*
5666 	 * If the node still has available memory. we need kmem_cache_node
5667 	 * for it yet.
5668 	 */
5669 	if (offline_node < 0)
5670 		return;
5671 
5672 	mutex_lock(&slab_mutex);
5673 	node_clear(offline_node, slab_nodes);
5674 	/*
5675 	 * We no longer free kmem_cache_node structures here, as it would be
5676 	 * racy with all get_node() users, and infeasible to protect them with
5677 	 * slab_mutex.
5678 	 */
5679 	mutex_unlock(&slab_mutex);
5680 }
5681 
5682 static int slab_mem_going_online_callback(void *arg)
5683 {
5684 	struct kmem_cache_node *n;
5685 	struct kmem_cache *s;
5686 	struct memory_notify *marg = arg;
5687 	int nid = marg->status_change_nid_normal;
5688 	int ret = 0;
5689 
5690 	/*
5691 	 * If the node's memory is already available, then kmem_cache_node is
5692 	 * already created. Nothing to do.
5693 	 */
5694 	if (nid < 0)
5695 		return 0;
5696 
5697 	/*
5698 	 * We are bringing a node online. No memory is available yet. We must
5699 	 * allocate a kmem_cache_node structure in order to bring the node
5700 	 * online.
5701 	 */
5702 	mutex_lock(&slab_mutex);
5703 	list_for_each_entry(s, &slab_caches, list) {
5704 		/*
5705 		 * The structure may already exist if the node was previously
5706 		 * onlined and offlined.
5707 		 */
5708 		if (get_node(s, nid))
5709 			continue;
5710 		/*
5711 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
5712 		 *      since memory is not yet available from the node that
5713 		 *      is brought up.
5714 		 */
5715 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
5716 		if (!n) {
5717 			ret = -ENOMEM;
5718 			goto out;
5719 		}
5720 		init_kmem_cache_node(n);
5721 		s->node[nid] = n;
5722 	}
5723 	/*
5724 	 * Any cache created after this point will also have kmem_cache_node
5725 	 * initialized for the new node.
5726 	 */
5727 	node_set(nid, slab_nodes);
5728 out:
5729 	mutex_unlock(&slab_mutex);
5730 	return ret;
5731 }
5732 
5733 static int slab_memory_callback(struct notifier_block *self,
5734 				unsigned long action, void *arg)
5735 {
5736 	int ret = 0;
5737 
5738 	switch (action) {
5739 	case MEM_GOING_ONLINE:
5740 		ret = slab_mem_going_online_callback(arg);
5741 		break;
5742 	case MEM_GOING_OFFLINE:
5743 		ret = slab_mem_going_offline_callback(arg);
5744 		break;
5745 	case MEM_OFFLINE:
5746 	case MEM_CANCEL_ONLINE:
5747 		slab_mem_offline_callback(arg);
5748 		break;
5749 	case MEM_ONLINE:
5750 	case MEM_CANCEL_OFFLINE:
5751 		break;
5752 	}
5753 	if (ret)
5754 		ret = notifier_from_errno(ret);
5755 	else
5756 		ret = NOTIFY_OK;
5757 	return ret;
5758 }
5759 
5760 /********************************************************************
5761  *			Basic setup of slabs
5762  *******************************************************************/
5763 
5764 /*
5765  * Used for early kmem_cache structures that were allocated using
5766  * the page allocator. Allocate them properly then fix up the pointers
5767  * that may be pointing to the wrong kmem_cache structure.
5768  */
5769 
5770 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
5771 {
5772 	int node;
5773 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
5774 	struct kmem_cache_node *n;
5775 
5776 	memcpy(s, static_cache, kmem_cache->object_size);
5777 
5778 	/*
5779 	 * This runs very early, and only the boot processor is supposed to be
5780 	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
5781 	 * IPIs around.
5782 	 */
5783 	__flush_cpu_slab(s, smp_processor_id());
5784 	for_each_kmem_cache_node(s, node, n) {
5785 		struct slab *p;
5786 
5787 		list_for_each_entry(p, &n->partial, slab_list)
5788 			p->slab_cache = s;
5789 
5790 #ifdef CONFIG_SLUB_DEBUG
5791 		list_for_each_entry(p, &n->full, slab_list)
5792 			p->slab_cache = s;
5793 #endif
5794 	}
5795 	list_add(&s->list, &slab_caches);
5796 	return s;
5797 }
5798 
5799 void __init kmem_cache_init(void)
5800 {
5801 	static __initdata struct kmem_cache boot_kmem_cache,
5802 		boot_kmem_cache_node;
5803 	int node;
5804 
5805 	if (debug_guardpage_minorder())
5806 		slub_max_order = 0;
5807 
5808 	/* Print slub debugging pointers without hashing */
5809 	if (__slub_debug_enabled())
5810 		no_hash_pointers_enable(NULL);
5811 
5812 	kmem_cache_node = &boot_kmem_cache_node;
5813 	kmem_cache = &boot_kmem_cache;
5814 
5815 	/*
5816 	 * Initialize the nodemask for which we will allocate per node
5817 	 * structures. Here we don't need taking slab_mutex yet.
5818 	 */
5819 	for_each_node_state(node, N_NORMAL_MEMORY)
5820 		node_set(node, slab_nodes);
5821 
5822 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
5823 			sizeof(struct kmem_cache_node),
5824 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
5825 
5826 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
5827 
5828 	/* Able to allocate the per node structures */
5829 	slab_state = PARTIAL;
5830 
5831 	create_boot_cache(kmem_cache, "kmem_cache",
5832 			offsetof(struct kmem_cache, node) +
5833 				nr_node_ids * sizeof(struct kmem_cache_node *),
5834 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
5835 
5836 	kmem_cache = bootstrap(&boot_kmem_cache);
5837 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
5838 
5839 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
5840 	setup_kmalloc_cache_index_table();
5841 	create_kmalloc_caches();
5842 
5843 	/* Setup random freelists for each cache */
5844 	init_freelist_randomization();
5845 
5846 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
5847 				  slub_cpu_dead);
5848 
5849 	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
5850 		cache_line_size(),
5851 		slub_min_order, slub_max_order, slub_min_objects,
5852 		nr_cpu_ids, nr_node_ids);
5853 }
5854 
5855 void __init kmem_cache_init_late(void)
5856 {
5857 #ifndef CONFIG_SLUB_TINY
5858 	flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
5859 	WARN_ON(!flushwq);
5860 #endif
5861 }
5862 
5863 struct kmem_cache *
5864 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
5865 		   slab_flags_t flags, void (*ctor)(void *))
5866 {
5867 	struct kmem_cache *s;
5868 
5869 	s = find_mergeable(size, align, flags, name, ctor);
5870 	if (s) {
5871 		if (sysfs_slab_alias(s, name))
5872 			return NULL;
5873 
5874 		s->refcount++;
5875 
5876 		/*
5877 		 * Adjust the object sizes so that we clear
5878 		 * the complete object on kzalloc.
5879 		 */
5880 		s->object_size = max(s->object_size, size);
5881 		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
5882 	}
5883 
5884 	return s;
5885 }
5886 
5887 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
5888 {
5889 	int err;
5890 
5891 	err = kmem_cache_open(s, flags);
5892 	if (err)
5893 		return err;
5894 
5895 	/* Mutex is not taken during early boot */
5896 	if (slab_state <= UP)
5897 		return 0;
5898 
5899 	err = sysfs_slab_add(s);
5900 	if (err) {
5901 		__kmem_cache_release(s);
5902 		return err;
5903 	}
5904 
5905 	if (s->flags & SLAB_STORE_USER)
5906 		debugfs_slab_add(s);
5907 
5908 	return 0;
5909 }
5910 
5911 #ifdef SLAB_SUPPORTS_SYSFS
5912 static int count_inuse(struct slab *slab)
5913 {
5914 	return slab->inuse;
5915 }
5916 
5917 static int count_total(struct slab *slab)
5918 {
5919 	return slab->objects;
5920 }
5921 #endif
5922 
5923 #ifdef CONFIG_SLUB_DEBUG
5924 static void validate_slab(struct kmem_cache *s, struct slab *slab,
5925 			  unsigned long *obj_map)
5926 {
5927 	void *p;
5928 	void *addr = slab_address(slab);
5929 
5930 	if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
5931 		return;
5932 
5933 	/* Now we know that a valid freelist exists */
5934 	__fill_map(obj_map, s, slab);
5935 	for_each_object(p, s, addr, slab->objects) {
5936 		u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
5937 			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
5938 
5939 		if (!check_object(s, slab, p, val))
5940 			break;
5941 	}
5942 }
5943 
5944 static int validate_slab_node(struct kmem_cache *s,
5945 		struct kmem_cache_node *n, unsigned long *obj_map)
5946 {
5947 	unsigned long count = 0;
5948 	struct slab *slab;
5949 	unsigned long flags;
5950 
5951 	spin_lock_irqsave(&n->list_lock, flags);
5952 
5953 	list_for_each_entry(slab, &n->partial, slab_list) {
5954 		validate_slab(s, slab, obj_map);
5955 		count++;
5956 	}
5957 	if (count != n->nr_partial) {
5958 		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
5959 		       s->name, count, n->nr_partial);
5960 		slab_add_kunit_errors();
5961 	}
5962 
5963 	if (!(s->flags & SLAB_STORE_USER))
5964 		goto out;
5965 
5966 	list_for_each_entry(slab, &n->full, slab_list) {
5967 		validate_slab(s, slab, obj_map);
5968 		count++;
5969 	}
5970 	if (count != node_nr_slabs(n)) {
5971 		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
5972 		       s->name, count, node_nr_slabs(n));
5973 		slab_add_kunit_errors();
5974 	}
5975 
5976 out:
5977 	spin_unlock_irqrestore(&n->list_lock, flags);
5978 	return count;
5979 }
5980 
5981 long validate_slab_cache(struct kmem_cache *s)
5982 {
5983 	int node;
5984 	unsigned long count = 0;
5985 	struct kmem_cache_node *n;
5986 	unsigned long *obj_map;
5987 
5988 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
5989 	if (!obj_map)
5990 		return -ENOMEM;
5991 
5992 	flush_all(s);
5993 	for_each_kmem_cache_node(s, node, n)
5994 		count += validate_slab_node(s, n, obj_map);
5995 
5996 	bitmap_free(obj_map);
5997 
5998 	return count;
5999 }
6000 EXPORT_SYMBOL(validate_slab_cache);
6001 
6002 #ifdef CONFIG_DEBUG_FS
6003 /*
6004  * Generate lists of code addresses where slabcache objects are allocated
6005  * and freed.
6006  */
6007 
6008 struct location {
6009 	depot_stack_handle_t handle;
6010 	unsigned long count;
6011 	unsigned long addr;
6012 	unsigned long waste;
6013 	long long sum_time;
6014 	long min_time;
6015 	long max_time;
6016 	long min_pid;
6017 	long max_pid;
6018 	DECLARE_BITMAP(cpus, NR_CPUS);
6019 	nodemask_t nodes;
6020 };
6021 
6022 struct loc_track {
6023 	unsigned long max;
6024 	unsigned long count;
6025 	struct location *loc;
6026 	loff_t idx;
6027 };
6028 
6029 static struct dentry *slab_debugfs_root;
6030 
6031 static void free_loc_track(struct loc_track *t)
6032 {
6033 	if (t->max)
6034 		free_pages((unsigned long)t->loc,
6035 			get_order(sizeof(struct location) * t->max));
6036 }
6037 
6038 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
6039 {
6040 	struct location *l;
6041 	int order;
6042 
6043 	order = get_order(sizeof(struct location) * max);
6044 
6045 	l = (void *)__get_free_pages(flags, order);
6046 	if (!l)
6047 		return 0;
6048 
6049 	if (t->count) {
6050 		memcpy(l, t->loc, sizeof(struct location) * t->count);
6051 		free_loc_track(t);
6052 	}
6053 	t->max = max;
6054 	t->loc = l;
6055 	return 1;
6056 }
6057 
6058 static int add_location(struct loc_track *t, struct kmem_cache *s,
6059 				const struct track *track,
6060 				unsigned int orig_size)
6061 {
6062 	long start, end, pos;
6063 	struct location *l;
6064 	unsigned long caddr, chandle, cwaste;
6065 	unsigned long age = jiffies - track->when;
6066 	depot_stack_handle_t handle = 0;
6067 	unsigned int waste = s->object_size - orig_size;
6068 
6069 #ifdef CONFIG_STACKDEPOT
6070 	handle = READ_ONCE(track->handle);
6071 #endif
6072 	start = -1;
6073 	end = t->count;
6074 
6075 	for ( ; ; ) {
6076 		pos = start + (end - start + 1) / 2;
6077 
6078 		/*
6079 		 * There is nothing at "end". If we end up there
6080 		 * we need to add something to before end.
6081 		 */
6082 		if (pos == end)
6083 			break;
6084 
6085 		l = &t->loc[pos];
6086 		caddr = l->addr;
6087 		chandle = l->handle;
6088 		cwaste = l->waste;
6089 		if ((track->addr == caddr) && (handle == chandle) &&
6090 			(waste == cwaste)) {
6091 
6092 			l->count++;
6093 			if (track->when) {
6094 				l->sum_time += age;
6095 				if (age < l->min_time)
6096 					l->min_time = age;
6097 				if (age > l->max_time)
6098 					l->max_time = age;
6099 
6100 				if (track->pid < l->min_pid)
6101 					l->min_pid = track->pid;
6102 				if (track->pid > l->max_pid)
6103 					l->max_pid = track->pid;
6104 
6105 				cpumask_set_cpu(track->cpu,
6106 						to_cpumask(l->cpus));
6107 			}
6108 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
6109 			return 1;
6110 		}
6111 
6112 		if (track->addr < caddr)
6113 			end = pos;
6114 		else if (track->addr == caddr && handle < chandle)
6115 			end = pos;
6116 		else if (track->addr == caddr && handle == chandle &&
6117 				waste < cwaste)
6118 			end = pos;
6119 		else
6120 			start = pos;
6121 	}
6122 
6123 	/*
6124 	 * Not found. Insert new tracking element.
6125 	 */
6126 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
6127 		return 0;
6128 
6129 	l = t->loc + pos;
6130 	if (pos < t->count)
6131 		memmove(l + 1, l,
6132 			(t->count - pos) * sizeof(struct location));
6133 	t->count++;
6134 	l->count = 1;
6135 	l->addr = track->addr;
6136 	l->sum_time = age;
6137 	l->min_time = age;
6138 	l->max_time = age;
6139 	l->min_pid = track->pid;
6140 	l->max_pid = track->pid;
6141 	l->handle = handle;
6142 	l->waste = waste;
6143 	cpumask_clear(to_cpumask(l->cpus));
6144 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
6145 	nodes_clear(l->nodes);
6146 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
6147 	return 1;
6148 }
6149 
6150 static void process_slab(struct loc_track *t, struct kmem_cache *s,
6151 		struct slab *slab, enum track_item alloc,
6152 		unsigned long *obj_map)
6153 {
6154 	void *addr = slab_address(slab);
6155 	bool is_alloc = (alloc == TRACK_ALLOC);
6156 	void *p;
6157 
6158 	__fill_map(obj_map, s, slab);
6159 
6160 	for_each_object(p, s, addr, slab->objects)
6161 		if (!test_bit(__obj_to_index(s, addr, p), obj_map))
6162 			add_location(t, s, get_track(s, p, alloc),
6163 				     is_alloc ? get_orig_size(s, p) :
6164 						s->object_size);
6165 }
6166 #endif  /* CONFIG_DEBUG_FS   */
6167 #endif	/* CONFIG_SLUB_DEBUG */
6168 
6169 #ifdef SLAB_SUPPORTS_SYSFS
6170 enum slab_stat_type {
6171 	SL_ALL,			/* All slabs */
6172 	SL_PARTIAL,		/* Only partially allocated slabs */
6173 	SL_CPU,			/* Only slabs used for cpu caches */
6174 	SL_OBJECTS,		/* Determine allocated objects not slabs */
6175 	SL_TOTAL		/* Determine object capacity not slabs */
6176 };
6177 
6178 #define SO_ALL		(1 << SL_ALL)
6179 #define SO_PARTIAL	(1 << SL_PARTIAL)
6180 #define SO_CPU		(1 << SL_CPU)
6181 #define SO_OBJECTS	(1 << SL_OBJECTS)
6182 #define SO_TOTAL	(1 << SL_TOTAL)
6183 
6184 static ssize_t show_slab_objects(struct kmem_cache *s,
6185 				 char *buf, unsigned long flags)
6186 {
6187 	unsigned long total = 0;
6188 	int node;
6189 	int x;
6190 	unsigned long *nodes;
6191 	int len = 0;
6192 
6193 	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
6194 	if (!nodes)
6195 		return -ENOMEM;
6196 
6197 	if (flags & SO_CPU) {
6198 		int cpu;
6199 
6200 		for_each_possible_cpu(cpu) {
6201 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
6202 							       cpu);
6203 			int node;
6204 			struct slab *slab;
6205 
6206 			slab = READ_ONCE(c->slab);
6207 			if (!slab)
6208 				continue;
6209 
6210 			node = slab_nid(slab);
6211 			if (flags & SO_TOTAL)
6212 				x = slab->objects;
6213 			else if (flags & SO_OBJECTS)
6214 				x = slab->inuse;
6215 			else
6216 				x = 1;
6217 
6218 			total += x;
6219 			nodes[node] += x;
6220 
6221 #ifdef CONFIG_SLUB_CPU_PARTIAL
6222 			slab = slub_percpu_partial_read_once(c);
6223 			if (slab) {
6224 				node = slab_nid(slab);
6225 				if (flags & SO_TOTAL)
6226 					WARN_ON_ONCE(1);
6227 				else if (flags & SO_OBJECTS)
6228 					WARN_ON_ONCE(1);
6229 				else
6230 					x = data_race(slab->slabs);
6231 				total += x;
6232 				nodes[node] += x;
6233 			}
6234 #endif
6235 		}
6236 	}
6237 
6238 	/*
6239 	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
6240 	 * already held which will conflict with an existing lock order:
6241 	 *
6242 	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
6243 	 *
6244 	 * We don't really need mem_hotplug_lock (to hold off
6245 	 * slab_mem_going_offline_callback) here because slab's memory hot
6246 	 * unplug code doesn't destroy the kmem_cache->node[] data.
6247 	 */
6248 
6249 #ifdef CONFIG_SLUB_DEBUG
6250 	if (flags & SO_ALL) {
6251 		struct kmem_cache_node *n;
6252 
6253 		for_each_kmem_cache_node(s, node, n) {
6254 
6255 			if (flags & SO_TOTAL)
6256 				x = node_nr_objs(n);
6257 			else if (flags & SO_OBJECTS)
6258 				x = node_nr_objs(n) - count_partial(n, count_free);
6259 			else
6260 				x = node_nr_slabs(n);
6261 			total += x;
6262 			nodes[node] += x;
6263 		}
6264 
6265 	} else
6266 #endif
6267 	if (flags & SO_PARTIAL) {
6268 		struct kmem_cache_node *n;
6269 
6270 		for_each_kmem_cache_node(s, node, n) {
6271 			if (flags & SO_TOTAL)
6272 				x = count_partial(n, count_total);
6273 			else if (flags & SO_OBJECTS)
6274 				x = count_partial(n, count_inuse);
6275 			else
6276 				x = n->nr_partial;
6277 			total += x;
6278 			nodes[node] += x;
6279 		}
6280 	}
6281 
6282 	len += sysfs_emit_at(buf, len, "%lu", total);
6283 #ifdef CONFIG_NUMA
6284 	for (node = 0; node < nr_node_ids; node++) {
6285 		if (nodes[node])
6286 			len += sysfs_emit_at(buf, len, " N%d=%lu",
6287 					     node, nodes[node]);
6288 	}
6289 #endif
6290 	len += sysfs_emit_at(buf, len, "\n");
6291 	kfree(nodes);
6292 
6293 	return len;
6294 }
6295 
6296 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
6297 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
6298 
6299 struct slab_attribute {
6300 	struct attribute attr;
6301 	ssize_t (*show)(struct kmem_cache *s, char *buf);
6302 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
6303 };
6304 
6305 #define SLAB_ATTR_RO(_name) \
6306 	static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
6307 
6308 #define SLAB_ATTR(_name) \
6309 	static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
6310 
6311 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
6312 {
6313 	return sysfs_emit(buf, "%u\n", s->size);
6314 }
6315 SLAB_ATTR_RO(slab_size);
6316 
6317 static ssize_t align_show(struct kmem_cache *s, char *buf)
6318 {
6319 	return sysfs_emit(buf, "%u\n", s->align);
6320 }
6321 SLAB_ATTR_RO(align);
6322 
6323 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
6324 {
6325 	return sysfs_emit(buf, "%u\n", s->object_size);
6326 }
6327 SLAB_ATTR_RO(object_size);
6328 
6329 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
6330 {
6331 	return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
6332 }
6333 SLAB_ATTR_RO(objs_per_slab);
6334 
6335 static ssize_t order_show(struct kmem_cache *s, char *buf)
6336 {
6337 	return sysfs_emit(buf, "%u\n", oo_order(s->oo));
6338 }
6339 SLAB_ATTR_RO(order);
6340 
6341 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
6342 {
6343 	return sysfs_emit(buf, "%lu\n", s->min_partial);
6344 }
6345 
6346 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
6347 				 size_t length)
6348 {
6349 	unsigned long min;
6350 	int err;
6351 
6352 	err = kstrtoul(buf, 10, &min);
6353 	if (err)
6354 		return err;
6355 
6356 	s->min_partial = min;
6357 	return length;
6358 }
6359 SLAB_ATTR(min_partial);
6360 
6361 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
6362 {
6363 	unsigned int nr_partial = 0;
6364 #ifdef CONFIG_SLUB_CPU_PARTIAL
6365 	nr_partial = s->cpu_partial;
6366 #endif
6367 
6368 	return sysfs_emit(buf, "%u\n", nr_partial);
6369 }
6370 
6371 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
6372 				 size_t length)
6373 {
6374 	unsigned int objects;
6375 	int err;
6376 
6377 	err = kstrtouint(buf, 10, &objects);
6378 	if (err)
6379 		return err;
6380 	if (objects && !kmem_cache_has_cpu_partial(s))
6381 		return -EINVAL;
6382 
6383 	slub_set_cpu_partial(s, objects);
6384 	flush_all(s);
6385 	return length;
6386 }
6387 SLAB_ATTR(cpu_partial);
6388 
6389 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
6390 {
6391 	if (!s->ctor)
6392 		return 0;
6393 	return sysfs_emit(buf, "%pS\n", s->ctor);
6394 }
6395 SLAB_ATTR_RO(ctor);
6396 
6397 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
6398 {
6399 	return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
6400 }
6401 SLAB_ATTR_RO(aliases);
6402 
6403 static ssize_t partial_show(struct kmem_cache *s, char *buf)
6404 {
6405 	return show_slab_objects(s, buf, SO_PARTIAL);
6406 }
6407 SLAB_ATTR_RO(partial);
6408 
6409 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
6410 {
6411 	return show_slab_objects(s, buf, SO_CPU);
6412 }
6413 SLAB_ATTR_RO(cpu_slabs);
6414 
6415 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
6416 {
6417 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
6418 }
6419 SLAB_ATTR_RO(objects_partial);
6420 
6421 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
6422 {
6423 	int objects = 0;
6424 	int slabs = 0;
6425 	int cpu __maybe_unused;
6426 	int len = 0;
6427 
6428 #ifdef CONFIG_SLUB_CPU_PARTIAL
6429 	for_each_online_cpu(cpu) {
6430 		struct slab *slab;
6431 
6432 		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6433 
6434 		if (slab)
6435 			slabs += data_race(slab->slabs);
6436 	}
6437 #endif
6438 
6439 	/* Approximate half-full slabs, see slub_set_cpu_partial() */
6440 	objects = (slabs * oo_objects(s->oo)) / 2;
6441 	len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
6442 
6443 #ifdef CONFIG_SLUB_CPU_PARTIAL
6444 	for_each_online_cpu(cpu) {
6445 		struct slab *slab;
6446 
6447 		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6448 		if (slab) {
6449 			slabs = data_race(slab->slabs);
6450 			objects = (slabs * oo_objects(s->oo)) / 2;
6451 			len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
6452 					     cpu, objects, slabs);
6453 		}
6454 	}
6455 #endif
6456 	len += sysfs_emit_at(buf, len, "\n");
6457 
6458 	return len;
6459 }
6460 SLAB_ATTR_RO(slabs_cpu_partial);
6461 
6462 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
6463 {
6464 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
6465 }
6466 SLAB_ATTR_RO(reclaim_account);
6467 
6468 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
6469 {
6470 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
6471 }
6472 SLAB_ATTR_RO(hwcache_align);
6473 
6474 #ifdef CONFIG_ZONE_DMA
6475 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
6476 {
6477 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
6478 }
6479 SLAB_ATTR_RO(cache_dma);
6480 #endif
6481 
6482 #ifdef CONFIG_HARDENED_USERCOPY
6483 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
6484 {
6485 	return sysfs_emit(buf, "%u\n", s->usersize);
6486 }
6487 SLAB_ATTR_RO(usersize);
6488 #endif
6489 
6490 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
6491 {
6492 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
6493 }
6494 SLAB_ATTR_RO(destroy_by_rcu);
6495 
6496 #ifdef CONFIG_SLUB_DEBUG
6497 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
6498 {
6499 	return show_slab_objects(s, buf, SO_ALL);
6500 }
6501 SLAB_ATTR_RO(slabs);
6502 
6503 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
6504 {
6505 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
6506 }
6507 SLAB_ATTR_RO(total_objects);
6508 
6509 static ssize_t objects_show(struct kmem_cache *s, char *buf)
6510 {
6511 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
6512 }
6513 SLAB_ATTR_RO(objects);
6514 
6515 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
6516 {
6517 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
6518 }
6519 SLAB_ATTR_RO(sanity_checks);
6520 
6521 static ssize_t trace_show(struct kmem_cache *s, char *buf)
6522 {
6523 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
6524 }
6525 SLAB_ATTR_RO(trace);
6526 
6527 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
6528 {
6529 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
6530 }
6531 
6532 SLAB_ATTR_RO(red_zone);
6533 
6534 static ssize_t poison_show(struct kmem_cache *s, char *buf)
6535 {
6536 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
6537 }
6538 
6539 SLAB_ATTR_RO(poison);
6540 
6541 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
6542 {
6543 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
6544 }
6545 
6546 SLAB_ATTR_RO(store_user);
6547 
6548 static ssize_t validate_show(struct kmem_cache *s, char *buf)
6549 {
6550 	return 0;
6551 }
6552 
6553 static ssize_t validate_store(struct kmem_cache *s,
6554 			const char *buf, size_t length)
6555 {
6556 	int ret = -EINVAL;
6557 
6558 	if (buf[0] == '1' && kmem_cache_debug(s)) {
6559 		ret = validate_slab_cache(s);
6560 		if (ret >= 0)
6561 			ret = length;
6562 	}
6563 	return ret;
6564 }
6565 SLAB_ATTR(validate);
6566 
6567 #endif /* CONFIG_SLUB_DEBUG */
6568 
6569 #ifdef CONFIG_FAILSLAB
6570 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
6571 {
6572 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
6573 }
6574 
6575 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
6576 				size_t length)
6577 {
6578 	if (s->refcount > 1)
6579 		return -EINVAL;
6580 
6581 	if (buf[0] == '1')
6582 		WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
6583 	else
6584 		WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
6585 
6586 	return length;
6587 }
6588 SLAB_ATTR(failslab);
6589 #endif
6590 
6591 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
6592 {
6593 	return 0;
6594 }
6595 
6596 static ssize_t shrink_store(struct kmem_cache *s,
6597 			const char *buf, size_t length)
6598 {
6599 	if (buf[0] == '1')
6600 		kmem_cache_shrink(s);
6601 	else
6602 		return -EINVAL;
6603 	return length;
6604 }
6605 SLAB_ATTR(shrink);
6606 
6607 #ifdef CONFIG_NUMA
6608 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
6609 {
6610 	return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
6611 }
6612 
6613 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
6614 				const char *buf, size_t length)
6615 {
6616 	unsigned int ratio;
6617 	int err;
6618 
6619 	err = kstrtouint(buf, 10, &ratio);
6620 	if (err)
6621 		return err;
6622 	if (ratio > 100)
6623 		return -ERANGE;
6624 
6625 	s->remote_node_defrag_ratio = ratio * 10;
6626 
6627 	return length;
6628 }
6629 SLAB_ATTR(remote_node_defrag_ratio);
6630 #endif
6631 
6632 #ifdef CONFIG_SLUB_STATS
6633 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
6634 {
6635 	unsigned long sum  = 0;
6636 	int cpu;
6637 	int len = 0;
6638 	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
6639 
6640 	if (!data)
6641 		return -ENOMEM;
6642 
6643 	for_each_online_cpu(cpu) {
6644 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
6645 
6646 		data[cpu] = x;
6647 		sum += x;
6648 	}
6649 
6650 	len += sysfs_emit_at(buf, len, "%lu", sum);
6651 
6652 #ifdef CONFIG_SMP
6653 	for_each_online_cpu(cpu) {
6654 		if (data[cpu])
6655 			len += sysfs_emit_at(buf, len, " C%d=%u",
6656 					     cpu, data[cpu]);
6657 	}
6658 #endif
6659 	kfree(data);
6660 	len += sysfs_emit_at(buf, len, "\n");
6661 
6662 	return len;
6663 }
6664 
6665 static void clear_stat(struct kmem_cache *s, enum stat_item si)
6666 {
6667 	int cpu;
6668 
6669 	for_each_online_cpu(cpu)
6670 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
6671 }
6672 
6673 #define STAT_ATTR(si, text) 					\
6674 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
6675 {								\
6676 	return show_stat(s, buf, si);				\
6677 }								\
6678 static ssize_t text##_store(struct kmem_cache *s,		\
6679 				const char *buf, size_t length)	\
6680 {								\
6681 	if (buf[0] != '0')					\
6682 		return -EINVAL;					\
6683 	clear_stat(s, si);					\
6684 	return length;						\
6685 }								\
6686 SLAB_ATTR(text);						\
6687 
6688 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
6689 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
6690 STAT_ATTR(FREE_FASTPATH, free_fastpath);
6691 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
6692 STAT_ATTR(FREE_FROZEN, free_frozen);
6693 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
6694 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
6695 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
6696 STAT_ATTR(ALLOC_SLAB, alloc_slab);
6697 STAT_ATTR(ALLOC_REFILL, alloc_refill);
6698 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
6699 STAT_ATTR(FREE_SLAB, free_slab);
6700 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
6701 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
6702 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
6703 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
6704 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
6705 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
6706 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
6707 STAT_ATTR(ORDER_FALLBACK, order_fallback);
6708 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
6709 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
6710 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
6711 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
6712 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
6713 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
6714 #endif	/* CONFIG_SLUB_STATS */
6715 
6716 #ifdef CONFIG_KFENCE
6717 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
6718 {
6719 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
6720 }
6721 
6722 static ssize_t skip_kfence_store(struct kmem_cache *s,
6723 			const char *buf, size_t length)
6724 {
6725 	int ret = length;
6726 
6727 	if (buf[0] == '0')
6728 		s->flags &= ~SLAB_SKIP_KFENCE;
6729 	else if (buf[0] == '1')
6730 		s->flags |= SLAB_SKIP_KFENCE;
6731 	else
6732 		ret = -EINVAL;
6733 
6734 	return ret;
6735 }
6736 SLAB_ATTR(skip_kfence);
6737 #endif
6738 
6739 static struct attribute *slab_attrs[] = {
6740 	&slab_size_attr.attr,
6741 	&object_size_attr.attr,
6742 	&objs_per_slab_attr.attr,
6743 	&order_attr.attr,
6744 	&min_partial_attr.attr,
6745 	&cpu_partial_attr.attr,
6746 	&objects_partial_attr.attr,
6747 	&partial_attr.attr,
6748 	&cpu_slabs_attr.attr,
6749 	&ctor_attr.attr,
6750 	&aliases_attr.attr,
6751 	&align_attr.attr,
6752 	&hwcache_align_attr.attr,
6753 	&reclaim_account_attr.attr,
6754 	&destroy_by_rcu_attr.attr,
6755 	&shrink_attr.attr,
6756 	&slabs_cpu_partial_attr.attr,
6757 #ifdef CONFIG_SLUB_DEBUG
6758 	&total_objects_attr.attr,
6759 	&objects_attr.attr,
6760 	&slabs_attr.attr,
6761 	&sanity_checks_attr.attr,
6762 	&trace_attr.attr,
6763 	&red_zone_attr.attr,
6764 	&poison_attr.attr,
6765 	&store_user_attr.attr,
6766 	&validate_attr.attr,
6767 #endif
6768 #ifdef CONFIG_ZONE_DMA
6769 	&cache_dma_attr.attr,
6770 #endif
6771 #ifdef CONFIG_NUMA
6772 	&remote_node_defrag_ratio_attr.attr,
6773 #endif
6774 #ifdef CONFIG_SLUB_STATS
6775 	&alloc_fastpath_attr.attr,
6776 	&alloc_slowpath_attr.attr,
6777 	&free_fastpath_attr.attr,
6778 	&free_slowpath_attr.attr,
6779 	&free_frozen_attr.attr,
6780 	&free_add_partial_attr.attr,
6781 	&free_remove_partial_attr.attr,
6782 	&alloc_from_partial_attr.attr,
6783 	&alloc_slab_attr.attr,
6784 	&alloc_refill_attr.attr,
6785 	&alloc_node_mismatch_attr.attr,
6786 	&free_slab_attr.attr,
6787 	&cpuslab_flush_attr.attr,
6788 	&deactivate_full_attr.attr,
6789 	&deactivate_empty_attr.attr,
6790 	&deactivate_to_head_attr.attr,
6791 	&deactivate_to_tail_attr.attr,
6792 	&deactivate_remote_frees_attr.attr,
6793 	&deactivate_bypass_attr.attr,
6794 	&order_fallback_attr.attr,
6795 	&cmpxchg_double_fail_attr.attr,
6796 	&cmpxchg_double_cpu_fail_attr.attr,
6797 	&cpu_partial_alloc_attr.attr,
6798 	&cpu_partial_free_attr.attr,
6799 	&cpu_partial_node_attr.attr,
6800 	&cpu_partial_drain_attr.attr,
6801 #endif
6802 #ifdef CONFIG_FAILSLAB
6803 	&failslab_attr.attr,
6804 #endif
6805 #ifdef CONFIG_HARDENED_USERCOPY
6806 	&usersize_attr.attr,
6807 #endif
6808 #ifdef CONFIG_KFENCE
6809 	&skip_kfence_attr.attr,
6810 #endif
6811 
6812 	NULL
6813 };
6814 
6815 static const struct attribute_group slab_attr_group = {
6816 	.attrs = slab_attrs,
6817 };
6818 
6819 static ssize_t slab_attr_show(struct kobject *kobj,
6820 				struct attribute *attr,
6821 				char *buf)
6822 {
6823 	struct slab_attribute *attribute;
6824 	struct kmem_cache *s;
6825 
6826 	attribute = to_slab_attr(attr);
6827 	s = to_slab(kobj);
6828 
6829 	if (!attribute->show)
6830 		return -EIO;
6831 
6832 	return attribute->show(s, buf);
6833 }
6834 
6835 static ssize_t slab_attr_store(struct kobject *kobj,
6836 				struct attribute *attr,
6837 				const char *buf, size_t len)
6838 {
6839 	struct slab_attribute *attribute;
6840 	struct kmem_cache *s;
6841 
6842 	attribute = to_slab_attr(attr);
6843 	s = to_slab(kobj);
6844 
6845 	if (!attribute->store)
6846 		return -EIO;
6847 
6848 	return attribute->store(s, buf, len);
6849 }
6850 
6851 static void kmem_cache_release(struct kobject *k)
6852 {
6853 	slab_kmem_cache_release(to_slab(k));
6854 }
6855 
6856 static const struct sysfs_ops slab_sysfs_ops = {
6857 	.show = slab_attr_show,
6858 	.store = slab_attr_store,
6859 };
6860 
6861 static const struct kobj_type slab_ktype = {
6862 	.sysfs_ops = &slab_sysfs_ops,
6863 	.release = kmem_cache_release,
6864 };
6865 
6866 static struct kset *slab_kset;
6867 
6868 static inline struct kset *cache_kset(struct kmem_cache *s)
6869 {
6870 	return slab_kset;
6871 }
6872 
6873 #define ID_STR_LENGTH 32
6874 
6875 /* Create a unique string id for a slab cache:
6876  *
6877  * Format	:[flags-]size
6878  */
6879 static char *create_unique_id(struct kmem_cache *s)
6880 {
6881 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
6882 	char *p = name;
6883 
6884 	if (!name)
6885 		return ERR_PTR(-ENOMEM);
6886 
6887 	*p++ = ':';
6888 	/*
6889 	 * First flags affecting slabcache operations. We will only
6890 	 * get here for aliasable slabs so we do not need to support
6891 	 * too many flags. The flags here must cover all flags that
6892 	 * are matched during merging to guarantee that the id is
6893 	 * unique.
6894 	 */
6895 	if (s->flags & SLAB_CACHE_DMA)
6896 		*p++ = 'd';
6897 	if (s->flags & SLAB_CACHE_DMA32)
6898 		*p++ = 'D';
6899 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
6900 		*p++ = 'a';
6901 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
6902 		*p++ = 'F';
6903 	if (s->flags & SLAB_ACCOUNT)
6904 		*p++ = 'A';
6905 	if (p != name + 1)
6906 		*p++ = '-';
6907 	p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
6908 
6909 	if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
6910 		kfree(name);
6911 		return ERR_PTR(-EINVAL);
6912 	}
6913 	kmsan_unpoison_memory(name, p - name);
6914 	return name;
6915 }
6916 
6917 static int sysfs_slab_add(struct kmem_cache *s)
6918 {
6919 	int err;
6920 	const char *name;
6921 	struct kset *kset = cache_kset(s);
6922 	int unmergeable = slab_unmergeable(s);
6923 
6924 	if (!unmergeable && disable_higher_order_debug &&
6925 			(slub_debug & DEBUG_METADATA_FLAGS))
6926 		unmergeable = 1;
6927 
6928 	if (unmergeable) {
6929 		/*
6930 		 * Slabcache can never be merged so we can use the name proper.
6931 		 * This is typically the case for debug situations. In that
6932 		 * case we can catch duplicate names easily.
6933 		 */
6934 		sysfs_remove_link(&slab_kset->kobj, s->name);
6935 		name = s->name;
6936 	} else {
6937 		/*
6938 		 * Create a unique name for the slab as a target
6939 		 * for the symlinks.
6940 		 */
6941 		name = create_unique_id(s);
6942 		if (IS_ERR(name))
6943 			return PTR_ERR(name);
6944 	}
6945 
6946 	s->kobj.kset = kset;
6947 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
6948 	if (err)
6949 		goto out;
6950 
6951 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
6952 	if (err)
6953 		goto out_del_kobj;
6954 
6955 	if (!unmergeable) {
6956 		/* Setup first alias */
6957 		sysfs_slab_alias(s, s->name);
6958 	}
6959 out:
6960 	if (!unmergeable)
6961 		kfree(name);
6962 	return err;
6963 out_del_kobj:
6964 	kobject_del(&s->kobj);
6965 	goto out;
6966 }
6967 
6968 void sysfs_slab_unlink(struct kmem_cache *s)
6969 {
6970 	kobject_del(&s->kobj);
6971 }
6972 
6973 void sysfs_slab_release(struct kmem_cache *s)
6974 {
6975 	kobject_put(&s->kobj);
6976 }
6977 
6978 /*
6979  * Need to buffer aliases during bootup until sysfs becomes
6980  * available lest we lose that information.
6981  */
6982 struct saved_alias {
6983 	struct kmem_cache *s;
6984 	const char *name;
6985 	struct saved_alias *next;
6986 };
6987 
6988 static struct saved_alias *alias_list;
6989 
6990 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
6991 {
6992 	struct saved_alias *al;
6993 
6994 	if (slab_state == FULL) {
6995 		/*
6996 		 * If we have a leftover link then remove it.
6997 		 */
6998 		sysfs_remove_link(&slab_kset->kobj, name);
6999 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
7000 	}
7001 
7002 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
7003 	if (!al)
7004 		return -ENOMEM;
7005 
7006 	al->s = s;
7007 	al->name = name;
7008 	al->next = alias_list;
7009 	alias_list = al;
7010 	kmsan_unpoison_memory(al, sizeof(*al));
7011 	return 0;
7012 }
7013 
7014 static int __init slab_sysfs_init(void)
7015 {
7016 	struct kmem_cache *s;
7017 	int err;
7018 
7019 	mutex_lock(&slab_mutex);
7020 
7021 	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
7022 	if (!slab_kset) {
7023 		mutex_unlock(&slab_mutex);
7024 		pr_err("Cannot register slab subsystem.\n");
7025 		return -ENOMEM;
7026 	}
7027 
7028 	slab_state = FULL;
7029 
7030 	list_for_each_entry(s, &slab_caches, list) {
7031 		err = sysfs_slab_add(s);
7032 		if (err)
7033 			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
7034 			       s->name);
7035 	}
7036 
7037 	while (alias_list) {
7038 		struct saved_alias *al = alias_list;
7039 
7040 		alias_list = alias_list->next;
7041 		err = sysfs_slab_alias(al->s, al->name);
7042 		if (err)
7043 			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
7044 			       al->name);
7045 		kfree(al);
7046 	}
7047 
7048 	mutex_unlock(&slab_mutex);
7049 	return 0;
7050 }
7051 late_initcall(slab_sysfs_init);
7052 #endif /* SLAB_SUPPORTS_SYSFS */
7053 
7054 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
7055 static int slab_debugfs_show(struct seq_file *seq, void *v)
7056 {
7057 	struct loc_track *t = seq->private;
7058 	struct location *l;
7059 	unsigned long idx;
7060 
7061 	idx = (unsigned long) t->idx;
7062 	if (idx < t->count) {
7063 		l = &t->loc[idx];
7064 
7065 		seq_printf(seq, "%7ld ", l->count);
7066 
7067 		if (l->addr)
7068 			seq_printf(seq, "%pS", (void *)l->addr);
7069 		else
7070 			seq_puts(seq, "<not-available>");
7071 
7072 		if (l->waste)
7073 			seq_printf(seq, " waste=%lu/%lu",
7074 				l->count * l->waste, l->waste);
7075 
7076 		if (l->sum_time != l->min_time) {
7077 			seq_printf(seq, " age=%ld/%llu/%ld",
7078 				l->min_time, div_u64(l->sum_time, l->count),
7079 				l->max_time);
7080 		} else
7081 			seq_printf(seq, " age=%ld", l->min_time);
7082 
7083 		if (l->min_pid != l->max_pid)
7084 			seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
7085 		else
7086 			seq_printf(seq, " pid=%ld",
7087 				l->min_pid);
7088 
7089 		if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
7090 			seq_printf(seq, " cpus=%*pbl",
7091 				 cpumask_pr_args(to_cpumask(l->cpus)));
7092 
7093 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
7094 			seq_printf(seq, " nodes=%*pbl",
7095 				 nodemask_pr_args(&l->nodes));
7096 
7097 #ifdef CONFIG_STACKDEPOT
7098 		{
7099 			depot_stack_handle_t handle;
7100 			unsigned long *entries;
7101 			unsigned int nr_entries, j;
7102 
7103 			handle = READ_ONCE(l->handle);
7104 			if (handle) {
7105 				nr_entries = stack_depot_fetch(handle, &entries);
7106 				seq_puts(seq, "\n");
7107 				for (j = 0; j < nr_entries; j++)
7108 					seq_printf(seq, "        %pS\n", (void *)entries[j]);
7109 			}
7110 		}
7111 #endif
7112 		seq_puts(seq, "\n");
7113 	}
7114 
7115 	if (!idx && !t->count)
7116 		seq_puts(seq, "No data\n");
7117 
7118 	return 0;
7119 }
7120 
7121 static void slab_debugfs_stop(struct seq_file *seq, void *v)
7122 {
7123 }
7124 
7125 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
7126 {
7127 	struct loc_track *t = seq->private;
7128 
7129 	t->idx = ++(*ppos);
7130 	if (*ppos <= t->count)
7131 		return ppos;
7132 
7133 	return NULL;
7134 }
7135 
7136 static int cmp_loc_by_count(const void *a, const void *b, const void *data)
7137 {
7138 	struct location *loc1 = (struct location *)a;
7139 	struct location *loc2 = (struct location *)b;
7140 
7141 	if (loc1->count > loc2->count)
7142 		return -1;
7143 	else
7144 		return 1;
7145 }
7146 
7147 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
7148 {
7149 	struct loc_track *t = seq->private;
7150 
7151 	t->idx = *ppos;
7152 	return ppos;
7153 }
7154 
7155 static const struct seq_operations slab_debugfs_sops = {
7156 	.start  = slab_debugfs_start,
7157 	.next   = slab_debugfs_next,
7158 	.stop   = slab_debugfs_stop,
7159 	.show   = slab_debugfs_show,
7160 };
7161 
7162 static int slab_debug_trace_open(struct inode *inode, struct file *filep)
7163 {
7164 
7165 	struct kmem_cache_node *n;
7166 	enum track_item alloc;
7167 	int node;
7168 	struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
7169 						sizeof(struct loc_track));
7170 	struct kmem_cache *s = file_inode(filep)->i_private;
7171 	unsigned long *obj_map;
7172 
7173 	if (!t)
7174 		return -ENOMEM;
7175 
7176 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
7177 	if (!obj_map) {
7178 		seq_release_private(inode, filep);
7179 		return -ENOMEM;
7180 	}
7181 
7182 	if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
7183 		alloc = TRACK_ALLOC;
7184 	else
7185 		alloc = TRACK_FREE;
7186 
7187 	if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
7188 		bitmap_free(obj_map);
7189 		seq_release_private(inode, filep);
7190 		return -ENOMEM;
7191 	}
7192 
7193 	for_each_kmem_cache_node(s, node, n) {
7194 		unsigned long flags;
7195 		struct slab *slab;
7196 
7197 		if (!node_nr_slabs(n))
7198 			continue;
7199 
7200 		spin_lock_irqsave(&n->list_lock, flags);
7201 		list_for_each_entry(slab, &n->partial, slab_list)
7202 			process_slab(t, s, slab, alloc, obj_map);
7203 		list_for_each_entry(slab, &n->full, slab_list)
7204 			process_slab(t, s, slab, alloc, obj_map);
7205 		spin_unlock_irqrestore(&n->list_lock, flags);
7206 	}
7207 
7208 	/* Sort locations by count */
7209 	sort_r(t->loc, t->count, sizeof(struct location),
7210 		cmp_loc_by_count, NULL, NULL);
7211 
7212 	bitmap_free(obj_map);
7213 	return 0;
7214 }
7215 
7216 static int slab_debug_trace_release(struct inode *inode, struct file *file)
7217 {
7218 	struct seq_file *seq = file->private_data;
7219 	struct loc_track *t = seq->private;
7220 
7221 	free_loc_track(t);
7222 	return seq_release_private(inode, file);
7223 }
7224 
7225 static const struct file_operations slab_debugfs_fops = {
7226 	.open    = slab_debug_trace_open,
7227 	.read    = seq_read,
7228 	.llseek  = seq_lseek,
7229 	.release = slab_debug_trace_release,
7230 };
7231 
7232 static void debugfs_slab_add(struct kmem_cache *s)
7233 {
7234 	struct dentry *slab_cache_dir;
7235 
7236 	if (unlikely(!slab_debugfs_root))
7237 		return;
7238 
7239 	slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
7240 
7241 	debugfs_create_file("alloc_traces", 0400,
7242 		slab_cache_dir, s, &slab_debugfs_fops);
7243 
7244 	debugfs_create_file("free_traces", 0400,
7245 		slab_cache_dir, s, &slab_debugfs_fops);
7246 }
7247 
7248 void debugfs_slab_release(struct kmem_cache *s)
7249 {
7250 	debugfs_lookup_and_remove(s->name, slab_debugfs_root);
7251 }
7252 
7253 static int __init slab_debugfs_init(void)
7254 {
7255 	struct kmem_cache *s;
7256 
7257 	slab_debugfs_root = debugfs_create_dir("slab", NULL);
7258 
7259 	list_for_each_entry(s, &slab_caches, list)
7260 		if (s->flags & SLAB_STORE_USER)
7261 			debugfs_slab_add(s);
7262 
7263 	return 0;
7264 
7265 }
7266 __initcall(slab_debugfs_init);
7267 #endif
7268 /*
7269  * The /proc/slabinfo ABI
7270  */
7271 #ifdef CONFIG_SLUB_DEBUG
7272 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
7273 {
7274 	unsigned long nr_slabs = 0;
7275 	unsigned long nr_objs = 0;
7276 	unsigned long nr_free = 0;
7277 	int node;
7278 	struct kmem_cache_node *n;
7279 
7280 	for_each_kmem_cache_node(s, node, n) {
7281 		nr_slabs += node_nr_slabs(n);
7282 		nr_objs += node_nr_objs(n);
7283 		nr_free += count_partial_free_approx(n);
7284 	}
7285 
7286 	sinfo->active_objs = nr_objs - nr_free;
7287 	sinfo->num_objs = nr_objs;
7288 	sinfo->active_slabs = nr_slabs;
7289 	sinfo->num_slabs = nr_slabs;
7290 	sinfo->objects_per_slab = oo_objects(s->oo);
7291 	sinfo->cache_order = oo_order(s->oo);
7292 }
7293 #endif /* CONFIG_SLUB_DEBUG */
7294