xref: /linux/mm/slub.c (revision 9a881ea3da166b05ec0eb66df22a7228b90aa66c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SLUB: A slab allocator with low overhead percpu array caches and mostly
4  * lockless freeing of objects to slabs in the slowpath.
5  *
6  * The allocator synchronizes using spin_trylock for percpu arrays in the
7  * fastpath, and cmpxchg_double (or bit spinlock) for slowpath freeing.
8  * Uses a centralized lock to manage a pool of partial slabs.
9  *
10  * (C) 2007 SGI, Christoph Lameter
11  * (C) 2011 Linux Foundation, Christoph Lameter
12  * (C) 2025 SUSE, Vlastimil Babka
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/swap.h> /* mm_account_reclaimed_pages() */
17 #include <linux/module.h>
18 #include <linux/bit_spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/swab.h>
21 #include <linux/bitops.h>
22 #include <linux/slab.h>
23 #include "slab.h"
24 #include <linux/vmalloc.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kasan.h>
28 #include <linux/node.h>
29 #include <linux/kmsan.h>
30 #include <linux/cpu.h>
31 #include <linux/cpuset.h>
32 #include <linux/mempolicy.h>
33 #include <linux/ctype.h>
34 #include <linux/stackdepot.h>
35 #include <linux/debugobjects.h>
36 #include <linux/kallsyms.h>
37 #include <linux/kfence.h>
38 #include <linux/memory.h>
39 #include <linux/math64.h>
40 #include <linux/fault-inject.h>
41 #include <linux/kmemleak.h>
42 #include <linux/stacktrace.h>
43 #include <linux/prefetch.h>
44 #include <linux/memcontrol.h>
45 #include <linux/random.h>
46 #include <linux/prandom.h>
47 #include <kunit/test.h>
48 #include <kunit/test-bug.h>
49 #include <linux/sort.h>
50 #include <linux/irq_work.h>
51 #include <linux/kprobes.h>
52 #include <linux/debugfs.h>
53 #include <trace/events/kmem.h>
54 
55 #include "internal.h"
56 
57 /*
58  * Lock order:
59  *   0.  cpu_hotplug_lock
60  *   1.  slab_mutex (Global Mutex)
61  *   2a. kmem_cache->cpu_sheaves->lock (Local trylock)
62  *   2b. node->barn->lock (Spinlock)
63  *   2c. node->list_lock (Spinlock)
64  *   3.  slab_lock(slab) (Only on some arches)
65  *   4.  object_map_lock (Only for debugging)
66  *
67  *   slab_mutex
68  *
69  *   The role of the slab_mutex is to protect the list of all the slabs
70  *   and to synchronize major metadata changes to slab cache structures.
71  *   Also synchronizes memory hotplug callbacks.
72  *
73  *   slab_lock
74  *
75  *   The slab_lock is a wrapper around the page lock, thus it is a bit
76  *   spinlock.
77  *
78  *   The slab_lock is only used on arches that do not have the ability
79  *   to do a cmpxchg_double. It only protects:
80  *
81  *	A. slab->freelist	-> List of free objects in a slab
82  *	B. slab->inuse		-> Number of objects in use
83  *	C. slab->objects	-> Number of objects in slab
84  *	D. slab->frozen		-> frozen state
85  *
86  *   SL_partial slabs
87  *
88  *   Slabs on node partial list have at least one free object. A limited number
89  *   of slabs on the list can be fully free (slab->inuse == 0), until we start
90  *   discarding them. These slabs are marked with SL_partial, and the flag is
91  *   cleared while removing them, usually to grab their freelist afterwards.
92  *   This clearing also exempts them from list management. Please see
93  *   __slab_free() for more details.
94  *
95  *   Full slabs
96  *
97  *   For caches without debugging enabled, full slabs (slab->inuse ==
98  *   slab->objects and slab->freelist == NULL) are not placed on any list.
99  *   The __slab_free() freeing the first object from such a slab will place
100  *   it on the partial list. Caches with debugging enabled place such slab
101  *   on the full list and use different allocation and freeing paths.
102  *
103  *   Frozen slabs
104  *
105  *   If a slab is frozen then it is exempt from list management. It is used to
106  *   indicate a slab that has failed consistency checks and thus cannot be
107  *   allocated from anymore - it is also marked as full. Any previously
108  *   allocated objects will be simply leaked upon freeing instead of attempting
109  *   to modify the potentially corrupted freelist and metadata.
110  *
111  *   To sum up, the current scheme is:
112  *   - node partial slab:            SL_partial && !full && !frozen
113  *   - taken off partial list:      !SL_partial && !full && !frozen
114  *   - full slab, not on any list:  !SL_partial &&  full && !frozen
115  *   - frozen due to inconsistency: !SL_partial &&  full &&  frozen
116  *
117  *   node->list_lock (spinlock)
118  *
119  *   The list_lock protects the partial and full list on each node and
120  *   the partial slab counter. If taken then no new slabs may be added or
121  *   removed from the lists nor make the number of partial slabs be modified.
122  *   (Note that the total number of slabs is an atomic value that may be
123  *   modified without taking the list lock).
124  *
125  *   The list_lock is a centralized lock and thus we avoid taking it as
126  *   much as possible. As long as SLUB does not have to handle partial
127  *   slabs, operations can continue without any centralized lock.
128  *
129  *   For debug caches, all allocations are forced to go through a list_lock
130  *   protected region to serialize against concurrent validation.
131  *
132  *   cpu_sheaves->lock (local_trylock)
133  *
134  *   This lock protects fastpath operations on the percpu sheaves. On !RT it
135  *   only disables preemption and does no atomic operations. As long as the main
136  *   or spare sheaf can handle the allocation or free, there is no other
137  *   overhead.
138  *
139  *   node->barn->lock (spinlock)
140  *
141  *   This lock protects the operations on per-NUMA-node barn. It can quickly
142  *   serve an empty or full sheaf if available, and avoid more expensive refill
143  *   or flush operation.
144  *
145  *   Lockless freeing
146  *
147  *   Objects may have to be freed to their slabs when they are from a remote
148  *   node (where we want to avoid filling local sheaves with remote objects)
149  *   or when there are too many full sheaves. On architectures supporting
150  *   cmpxchg_double this is done by a lockless update of slab's freelist and
151  *   counters, otherwise slab_lock is taken. This only needs to take the
152  *   list_lock if it's a first free to a full slab, or when a slab becomes empty
153  *   after the free.
154  *
155  *   irq, preemption, migration considerations
156  *
157  *   Interrupts are disabled as part of list_lock or barn lock operations, or
158  *   around the slab_lock operation, in order to make the slab allocator safe
159  *   to use in the context of an irq.
160  *   Preemption is disabled as part of local_trylock operations.
161  *   kmalloc_nolock() and kfree_nolock() are safe in NMI context but see
162  *   their limitations.
163  *
164  * SLUB assigns two object arrays called sheaves for caching allocations and
165  * frees on each cpu, with a NUMA node shared barn for balancing between cpus.
166  * Allocations and frees are primarily served from these sheaves.
167  *
168  * Slabs with free elements are kept on a partial list and during regular
169  * operations no list for full slabs is used. If an object in a full slab is
170  * freed then the slab will show up again on the partial lists.
171  * We track full slabs for debugging purposes though because otherwise we
172  * cannot scan all objects.
173  *
174  * Slabs are freed when they become empty. Teardown and setup is minimal so we
175  * rely on the page allocators per cpu caches for fast frees and allocs.
176  *
177  * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
178  * 			options set. This moves	slab handling out of
179  * 			the fast path and disables lockless freelists.
180  */
181 
182 /**
183  * enum slab_flags - How the slab flags bits are used.
184  * @SL_locked: Is locked with slab_lock()
185  * @SL_partial: On the per-node partial list
186  * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves
187  *
188  * The slab flags share space with the page flags but some bits have
189  * different interpretations.  The high bits are used for information
190  * like zone/node/section.
191  */
192 enum slab_flags {
193 	SL_locked = PG_locked,
194 	SL_partial = PG_workingset,	/* Historical reasons for this bit */
195 	SL_pfmemalloc = PG_active,	/* Historical reasons for this bit */
196 };
197 
198 #ifndef CONFIG_SLUB_TINY
199 #define __fastpath_inline __always_inline
200 #else
201 #define __fastpath_inline
202 #endif
203 
204 #ifdef CONFIG_SLUB_DEBUG
205 #ifdef CONFIG_SLUB_DEBUG_ON
206 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
207 #else
208 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
209 #endif
210 #endif		/* CONFIG_SLUB_DEBUG */
211 
212 #ifdef CONFIG_NUMA
213 static DEFINE_STATIC_KEY_FALSE(strict_numa);
214 #endif
215 
216 /* Structure holding parameters for get_from_partial() call chain */
217 struct partial_context {
218 	gfp_t flags;
219 	unsigned int orig_size;
220 };
221 
222 /* Structure holding parameters for get_partial_node_bulk() */
223 struct partial_bulk_context {
224 	gfp_t flags;
225 	unsigned int min_objects;
226 	unsigned int max_objects;
227 	struct list_head slabs;
228 };
229 
kmem_cache_debug(struct kmem_cache * s)230 static inline bool kmem_cache_debug(struct kmem_cache *s)
231 {
232 	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
233 }
234 
fixup_red_left(struct kmem_cache * s,void * p)235 void *fixup_red_left(struct kmem_cache *s, void *p)
236 {
237 	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
238 		p += s->red_left_pad;
239 
240 	return p;
241 }
242 
243 /*
244  * Issues still to be resolved:
245  *
246  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
247  *
248  * - Variable sizing of the per node arrays
249  */
250 
251 /* Enable to log cmpxchg failures */
252 #undef SLUB_DEBUG_CMPXCHG
253 
254 #ifndef CONFIG_SLUB_TINY
255 /*
256  * Minimum number of partial slabs. These will be left on the partial
257  * lists even if they are empty. kmem_cache_shrink may reclaim them.
258  */
259 #define MIN_PARTIAL 5
260 
261 /*
262  * Maximum number of desirable partial slabs.
263  * The existence of more partial slabs makes kmem_cache_shrink
264  * sort the partial list by the number of objects in use.
265  */
266 #define MAX_PARTIAL 10
267 #else
268 #define MIN_PARTIAL 0
269 #define MAX_PARTIAL 0
270 #endif
271 
272 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
273 				SLAB_POISON | SLAB_STORE_USER)
274 
275 /*
276  * These debug flags cannot use CMPXCHG because there might be consistency
277  * issues when checking or reading debug information
278  */
279 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
280 				SLAB_TRACE)
281 
282 
283 /*
284  * Debugging flags that require metadata to be stored in the slab.  These get
285  * disabled when slab_debug=O is used and a cache's min order increases with
286  * metadata.
287  */
288 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
289 
290 #define OO_SHIFT	16
291 #define OO_MASK		((1 << OO_SHIFT) - 1)
292 #define MAX_OBJS_PER_PAGE	32767 /* since slab.objects is u15 */
293 
294 /* Internal SLUB flags */
295 /* Poison object */
296 #define __OBJECT_POISON		__SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
297 /* Use cmpxchg_double */
298 
299 #ifdef system_has_freelist_aba
300 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
301 #else
302 #define __CMPXCHG_DOUBLE	__SLAB_FLAG_UNUSED
303 #endif
304 
305 /*
306  * Tracking user of a slab.
307  */
308 #define TRACK_ADDRS_COUNT 16
309 struct track {
310 	unsigned long addr;	/* Called from address */
311 #ifdef CONFIG_STACKDEPOT
312 	depot_stack_handle_t handle;
313 #endif
314 	int cpu;		/* Was running on cpu */
315 	int pid;		/* Pid context */
316 	unsigned long when;	/* When did the operation occur */
317 };
318 
319 enum track_item { TRACK_ALLOC, TRACK_FREE };
320 
321 #ifdef SLAB_SUPPORTS_SYSFS
322 static int sysfs_slab_add(struct kmem_cache *);
323 #else
sysfs_slab_add(struct kmem_cache * s)324 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
325 #endif
326 
327 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
328 static void debugfs_slab_add(struct kmem_cache *);
329 #else
debugfs_slab_add(struct kmem_cache * s)330 static inline void debugfs_slab_add(struct kmem_cache *s) { }
331 #endif
332 
333 enum add_mode {
334 	ADD_TO_HEAD,
335 	ADD_TO_TAIL,
336 };
337 
338 enum stat_item {
339 	ALLOC_FASTPATH,		/* Allocation from percpu sheaves */
340 	ALLOC_SLOWPATH,		/* Allocation from partial or new slab */
341 	FREE_RCU_SHEAF,		/* Free to rcu_free sheaf */
342 	FREE_RCU_SHEAF_FAIL,	/* Failed to free to a rcu_free sheaf */
343 	FREE_FASTPATH,		/* Free to percpu sheaves */
344 	FREE_SLOWPATH,		/* Free to a slab */
345 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
346 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
347 	ALLOC_SLAB,		/* New slab acquired from page allocator */
348 	ALLOC_NODE_MISMATCH,	/* Requested node different from cpu sheaf */
349 	FREE_SLAB,		/* Slab freed to the page allocator */
350 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
351 	CMPXCHG_DOUBLE_FAIL,	/* Failures of slab freelist update */
352 	SHEAF_FLUSH,		/* Objects flushed from a sheaf */
353 	SHEAF_REFILL,		/* Objects refilled to a sheaf */
354 	SHEAF_ALLOC,		/* Allocation of an empty sheaf */
355 	SHEAF_FREE,		/* Freeing of an empty sheaf */
356 	BARN_GET,		/* Got full sheaf from barn */
357 	BARN_GET_FAIL,		/* Failed to get full sheaf from barn */
358 	BARN_PUT,		/* Put full sheaf to barn */
359 	BARN_PUT_FAIL,		/* Failed to put full sheaf to barn */
360 	SHEAF_PREFILL_FAST,	/* Sheaf prefill grabbed the spare sheaf */
361 	SHEAF_PREFILL_SLOW,	/* Sheaf prefill found no spare sheaf */
362 	SHEAF_PREFILL_OVERSIZE,	/* Allocation of oversize sheaf for prefill */
363 	SHEAF_RETURN_FAST,	/* Sheaf return reattached spare sheaf */
364 	SHEAF_RETURN_SLOW,	/* Sheaf return could not reattach spare */
365 	NR_SLUB_STAT_ITEMS
366 };
367 
368 #ifdef CONFIG_SLUB_STATS
369 struct kmem_cache_stats {
370 	unsigned int stat[NR_SLUB_STAT_ITEMS];
371 };
372 #endif
373 
stat(const struct kmem_cache * s,enum stat_item si)374 static inline void stat(const struct kmem_cache *s, enum stat_item si)
375 {
376 #ifdef CONFIG_SLUB_STATS
377 	/*
378 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
379 	 * avoid this_cpu_add()'s irq-disable overhead.
380 	 */
381 	raw_cpu_inc(s->cpu_stats->stat[si]);
382 #endif
383 }
384 
385 static inline
stat_add(const struct kmem_cache * s,enum stat_item si,int v)386 void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
387 {
388 #ifdef CONFIG_SLUB_STATS
389 	raw_cpu_add(s->cpu_stats->stat[si], v);
390 #endif
391 }
392 
393 #define MAX_FULL_SHEAVES	10
394 #define MAX_EMPTY_SHEAVES	10
395 
396 struct node_barn {
397 	spinlock_t lock;
398 	struct list_head sheaves_full;
399 	struct list_head sheaves_empty;
400 	unsigned int nr_full;
401 	unsigned int nr_empty;
402 };
403 
404 struct slab_sheaf {
405 	union {
406 		struct rcu_head rcu_head;
407 		struct list_head barn_list;
408 		/* only used for prefilled sheafs */
409 		struct {
410 			unsigned int capacity;
411 			bool pfmemalloc;
412 		};
413 	};
414 	struct kmem_cache *cache;
415 	unsigned int size;
416 	int node; /* only used for rcu_sheaf */
417 	void *objects[];
418 };
419 
420 struct slub_percpu_sheaves {
421 	local_trylock_t lock;
422 	struct slab_sheaf *main; /* never NULL when unlocked */
423 	struct slab_sheaf *spare; /* empty or full, may be NULL */
424 	struct slab_sheaf *rcu_free; /* for batching kfree_rcu() */
425 };
426 
427 /*
428  * The slab lists for all objects.
429  */
430 struct kmem_cache_node {
431 	spinlock_t list_lock;
432 	unsigned long nr_partial;
433 	struct list_head partial;
434 #ifdef CONFIG_SLUB_DEBUG
435 	atomic_long_t nr_slabs;
436 	atomic_long_t total_objects;
437 	struct list_head full;
438 #endif
439 	struct node_barn *barn;
440 };
441 
get_node(struct kmem_cache * s,int node)442 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
443 {
444 	return s->node[node];
445 }
446 
447 /*
448  * Get the barn of the current cpu's closest memory node. It may not exist on
449  * systems with memoryless nodes but without CONFIG_HAVE_MEMORYLESS_NODES
450  */
get_barn(struct kmem_cache * s)451 static inline struct node_barn *get_barn(struct kmem_cache *s)
452 {
453 	struct kmem_cache_node *n = get_node(s, numa_mem_id());
454 
455 	if (!n)
456 		return NULL;
457 
458 	return n->barn;
459 }
460 
461 /*
462  * Iterator over all nodes. The body will be executed for each node that has
463  * a kmem_cache_node structure allocated (which is true for all online nodes)
464  */
465 #define for_each_kmem_cache_node(__s, __node, __n) \
466 	for (__node = 0; __node < nr_node_ids; __node++) \
467 		 if ((__n = get_node(__s, __node)))
468 
469 /*
470  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
471  * Corresponds to node_state[N_MEMORY], but can temporarily
472  * differ during memory hotplug/hotremove operations.
473  * Protected by slab_mutex.
474  */
475 static nodemask_t slab_nodes;
476 
477 /*
478  * Workqueue used for flushing cpu and kfree_rcu sheaves.
479  */
480 static struct workqueue_struct *flushwq;
481 
482 struct slub_flush_work {
483 	struct work_struct work;
484 	struct kmem_cache *s;
485 	bool skip;
486 };
487 
488 static DEFINE_MUTEX(flush_lock);
489 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
490 
491 /********************************************************************
492  * 			Core slab cache functions
493  *******************************************************************/
494 
495 /*
496  * Returns freelist pointer (ptr). With hardening, this is obfuscated
497  * with an XOR of the address where the pointer is held and a per-cache
498  * random number.
499  */
freelist_ptr_encode(const struct kmem_cache * s,void * ptr,unsigned long ptr_addr)500 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
501 					    void *ptr, unsigned long ptr_addr)
502 {
503 	unsigned long encoded;
504 
505 #ifdef CONFIG_SLAB_FREELIST_HARDENED
506 	encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
507 #else
508 	encoded = (unsigned long)ptr;
509 #endif
510 	return (freeptr_t){.v = encoded};
511 }
512 
freelist_ptr_decode(const struct kmem_cache * s,freeptr_t ptr,unsigned long ptr_addr)513 static inline void *freelist_ptr_decode(const struct kmem_cache *s,
514 					freeptr_t ptr, unsigned long ptr_addr)
515 {
516 	void *decoded;
517 
518 #ifdef CONFIG_SLAB_FREELIST_HARDENED
519 	decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
520 #else
521 	decoded = (void *)ptr.v;
522 #endif
523 	return decoded;
524 }
525 
get_freepointer(struct kmem_cache * s,void * object)526 static inline void *get_freepointer(struct kmem_cache *s, void *object)
527 {
528 	unsigned long ptr_addr;
529 	freeptr_t p;
530 
531 	object = kasan_reset_tag(object);
532 	ptr_addr = (unsigned long)object + s->offset;
533 	p = *(freeptr_t *)(ptr_addr);
534 	return freelist_ptr_decode(s, p, ptr_addr);
535 }
536 
set_freepointer(struct kmem_cache * s,void * object,void * fp)537 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
538 {
539 	unsigned long freeptr_addr = (unsigned long)object + s->offset;
540 
541 #ifdef CONFIG_SLAB_FREELIST_HARDENED
542 	BUG_ON(object == fp); /* naive detection of double free or corruption */
543 #endif
544 
545 	freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
546 	*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
547 }
548 
549 /*
550  * See comment in calculate_sizes().
551  */
freeptr_outside_object(struct kmem_cache * s)552 static inline bool freeptr_outside_object(struct kmem_cache *s)
553 {
554 	return s->offset >= s->inuse;
555 }
556 
557 /*
558  * Return offset of the end of info block which is inuse + free pointer if
559  * not overlapping with object.
560  */
get_info_end(struct kmem_cache * s)561 static inline unsigned int get_info_end(struct kmem_cache *s)
562 {
563 	if (freeptr_outside_object(s))
564 		return s->inuse + sizeof(void *);
565 	else
566 		return s->inuse;
567 }
568 
569 /* Loop over all objects in a slab */
570 #define for_each_object(__p, __s, __addr, __objects) \
571 	for (__p = fixup_red_left(__s, __addr); \
572 		__p < (__addr) + (__objects) * (__s)->size; \
573 		__p += (__s)->size)
574 
order_objects(unsigned int order,unsigned int size)575 static inline unsigned int order_objects(unsigned int order, unsigned int size)
576 {
577 	return ((unsigned int)PAGE_SIZE << order) / size;
578 }
579 
oo_make(unsigned int order,unsigned int size)580 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
581 		unsigned int size)
582 {
583 	struct kmem_cache_order_objects x = {
584 		(order << OO_SHIFT) + order_objects(order, size)
585 	};
586 
587 	return x;
588 }
589 
oo_order(struct kmem_cache_order_objects x)590 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
591 {
592 	return x.x >> OO_SHIFT;
593 }
594 
oo_objects(struct kmem_cache_order_objects x)595 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
596 {
597 	return x.x & OO_MASK;
598 }
599 
600 /*
601  * If network-based swap is enabled, slub must keep track of whether memory
602  * were allocated from pfmemalloc reserves.
603  */
slab_test_pfmemalloc(const struct slab * slab)604 static inline bool slab_test_pfmemalloc(const struct slab *slab)
605 {
606 	return test_bit(SL_pfmemalloc, &slab->flags.f);
607 }
608 
slab_set_pfmemalloc(struct slab * slab)609 static inline void slab_set_pfmemalloc(struct slab *slab)
610 {
611 	set_bit(SL_pfmemalloc, &slab->flags.f);
612 }
613 
__slab_clear_pfmemalloc(struct slab * slab)614 static inline void __slab_clear_pfmemalloc(struct slab *slab)
615 {
616 	__clear_bit(SL_pfmemalloc, &slab->flags.f);
617 }
618 
619 /*
620  * Per slab locking using the pagelock
621  */
slab_lock(struct slab * slab)622 static __always_inline void slab_lock(struct slab *slab)
623 {
624 	bit_spin_lock(SL_locked, &slab->flags.f);
625 }
626 
slab_unlock(struct slab * slab)627 static __always_inline void slab_unlock(struct slab *slab)
628 {
629 	bit_spin_unlock(SL_locked, &slab->flags.f);
630 }
631 
632 static inline bool
__update_freelist_fast(struct slab * slab,struct freelist_counters * old,struct freelist_counters * new)633 __update_freelist_fast(struct slab *slab, struct freelist_counters *old,
634 		       struct freelist_counters *new)
635 {
636 #ifdef system_has_freelist_aba
637 	return try_cmpxchg_freelist(&slab->freelist_counters,
638 				    &old->freelist_counters,
639 				    new->freelist_counters);
640 #else
641 	return false;
642 #endif
643 }
644 
645 static inline bool
__update_freelist_slow(struct slab * slab,struct freelist_counters * old,struct freelist_counters * new)646 __update_freelist_slow(struct slab *slab, struct freelist_counters *old,
647 		       struct freelist_counters *new)
648 {
649 	bool ret = false;
650 
651 	slab_lock(slab);
652 	if (slab->freelist == old->freelist &&
653 	    slab->counters == old->counters) {
654 		slab->freelist = new->freelist;
655 		/* prevent tearing for the read in get_partial_node_bulk() */
656 		WRITE_ONCE(slab->counters, new->counters);
657 		ret = true;
658 	}
659 	slab_unlock(slab);
660 
661 	return ret;
662 }
663 
664 /*
665  * Interrupts must be disabled (for the fallback code to work right), typically
666  * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
667  * part of bit_spin_lock(), is sufficient because the policy is not to allow any
668  * allocation/ free operation in hardirq context. Therefore nothing can
669  * interrupt the operation.
670  */
__slab_update_freelist(struct kmem_cache * s,struct slab * slab,struct freelist_counters * old,struct freelist_counters * new,const char * n)671 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
672 		struct freelist_counters *old, struct freelist_counters *new, const char *n)
673 {
674 	bool ret;
675 
676 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
677 		lockdep_assert_irqs_disabled();
678 
679 	if (s->flags & __CMPXCHG_DOUBLE)
680 		ret = __update_freelist_fast(slab, old, new);
681 	else
682 		ret = __update_freelist_slow(slab, old, new);
683 
684 	if (likely(ret))
685 		return true;
686 
687 	cpu_relax();
688 	stat(s, CMPXCHG_DOUBLE_FAIL);
689 
690 #ifdef SLUB_DEBUG_CMPXCHG
691 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
692 #endif
693 
694 	return false;
695 }
696 
slab_update_freelist(struct kmem_cache * s,struct slab * slab,struct freelist_counters * old,struct freelist_counters * new,const char * n)697 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
698 		struct freelist_counters *old, struct freelist_counters *new, const char *n)
699 {
700 	bool ret;
701 
702 	if (s->flags & __CMPXCHG_DOUBLE) {
703 		ret = __update_freelist_fast(slab, old, new);
704 	} else {
705 		unsigned long flags;
706 
707 		local_irq_save(flags);
708 		ret = __update_freelist_slow(slab, old, new);
709 		local_irq_restore(flags);
710 	}
711 	if (likely(ret))
712 		return true;
713 
714 	cpu_relax();
715 	stat(s, CMPXCHG_DOUBLE_FAIL);
716 
717 #ifdef SLUB_DEBUG_CMPXCHG
718 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
719 #endif
720 
721 	return false;
722 }
723 
724 /*
725  * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
726  * family will round up the real request size to these fixed ones, so
727  * there could be an extra area than what is requested. Save the original
728  * request size in the meta data area, for better debug and sanity check.
729  */
set_orig_size(struct kmem_cache * s,void * object,unsigned long orig_size)730 static inline void set_orig_size(struct kmem_cache *s,
731 				void *object, unsigned long orig_size)
732 {
733 	void *p = kasan_reset_tag(object);
734 
735 	if (!slub_debug_orig_size(s))
736 		return;
737 
738 	p += get_info_end(s);
739 	p += sizeof(struct track) * 2;
740 
741 	*(unsigned long *)p = orig_size;
742 }
743 
get_orig_size(struct kmem_cache * s,void * object)744 static inline unsigned long get_orig_size(struct kmem_cache *s, void *object)
745 {
746 	void *p = kasan_reset_tag(object);
747 
748 	if (is_kfence_address(object))
749 		return kfence_ksize(object);
750 
751 	if (!slub_debug_orig_size(s))
752 		return s->object_size;
753 
754 	p += get_info_end(s);
755 	p += sizeof(struct track) * 2;
756 
757 	return *(unsigned long *)p;
758 }
759 
760 #ifdef CONFIG_SLAB_OBJ_EXT
761 
762 /*
763  * Check if memory cgroup or memory allocation profiling is enabled.
764  * If enabled, SLUB tries to reduce memory overhead of accounting
765  * slab objects. If neither is enabled when this function is called,
766  * the optimization is simply skipped to avoid affecting caches that do not
767  * need slabobj_ext metadata.
768  *
769  * However, this may disable optimization when memory cgroup or memory
770  * allocation profiling is used, but slabs are created too early
771  * even before those subsystems are initialized.
772  */
need_slab_obj_exts(struct kmem_cache * s)773 static inline bool need_slab_obj_exts(struct kmem_cache *s)
774 {
775 	if (s->flags & SLAB_NO_OBJ_EXT)
776 		return false;
777 
778 	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
779 		return true;
780 
781 	if (mem_alloc_profiling_enabled())
782 		return true;
783 
784 	return false;
785 }
786 
obj_exts_size_in_slab(struct slab * slab)787 static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
788 {
789 	return sizeof(struct slabobj_ext) * slab->objects;
790 }
791 
obj_exts_offset_in_slab(struct kmem_cache * s,struct slab * slab)792 static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
793 						    struct slab *slab)
794 {
795 	unsigned long objext_offset;
796 
797 	objext_offset = s->size * slab->objects;
798 	objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext));
799 	return objext_offset;
800 }
801 
obj_exts_fit_within_slab_leftover(struct kmem_cache * s,struct slab * slab)802 static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
803 						     struct slab *slab)
804 {
805 	unsigned long objext_offset = obj_exts_offset_in_slab(s, slab);
806 	unsigned long objext_size = obj_exts_size_in_slab(slab);
807 
808 	return objext_offset + objext_size <= slab_size(slab);
809 }
810 
obj_exts_in_slab(struct kmem_cache * s,struct slab * slab)811 static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
812 {
813 	unsigned long obj_exts;
814 	unsigned long start;
815 	unsigned long end;
816 
817 	obj_exts = slab_obj_exts(slab);
818 	if (!obj_exts)
819 		return false;
820 
821 	start = (unsigned long)slab_address(slab);
822 	end = start + slab_size(slab);
823 	return (obj_exts >= start) && (obj_exts < end);
824 }
825 #else
need_slab_obj_exts(struct kmem_cache * s)826 static inline bool need_slab_obj_exts(struct kmem_cache *s)
827 {
828 	return false;
829 }
830 
obj_exts_size_in_slab(struct slab * slab)831 static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
832 {
833 	return 0;
834 }
835 
obj_exts_offset_in_slab(struct kmem_cache * s,struct slab * slab)836 static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
837 						    struct slab *slab)
838 {
839 	return 0;
840 }
841 
obj_exts_fit_within_slab_leftover(struct kmem_cache * s,struct slab * slab)842 static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
843 						     struct slab *slab)
844 {
845 	return false;
846 }
847 
obj_exts_in_slab(struct kmem_cache * s,struct slab * slab)848 static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
849 {
850 	return false;
851 }
852 
853 #endif
854 
855 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
obj_exts_in_object(struct kmem_cache * s,struct slab * slab)856 static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
857 {
858 	/*
859 	 * Note we cannot rely on the SLAB_OBJ_EXT_IN_OBJ flag here and need to
860 	 * check the stride. A cache can have SLAB_OBJ_EXT_IN_OBJ set, but
861 	 * allocations within_slab_leftover are preferred. And those may be
862 	 * possible or not depending on the particular slab's size.
863 	 */
864 	return obj_exts_in_slab(s, slab) &&
865 	       (slab_get_stride(slab) == s->size);
866 }
867 
obj_exts_offset_in_object(struct kmem_cache * s)868 static unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
869 {
870 	unsigned int offset = get_info_end(s);
871 
872 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
873 		offset += sizeof(struct track) * 2;
874 
875 	if (slub_debug_orig_size(s))
876 		offset += sizeof(unsigned long);
877 
878 	offset += kasan_metadata_size(s, false);
879 
880 	return offset;
881 }
882 #else
obj_exts_in_object(struct kmem_cache * s,struct slab * slab)883 static inline bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
884 {
885 	return false;
886 }
887 
obj_exts_offset_in_object(struct kmem_cache * s)888 static inline unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
889 {
890 	return 0;
891 }
892 #endif
893 
894 #ifdef CONFIG_SLUB_DEBUG
895 
896 /*
897  * For debugging context when we want to check if the struct slab pointer
898  * appears to be valid.
899  */
validate_slab_ptr(struct slab * slab)900 static inline bool validate_slab_ptr(struct slab *slab)
901 {
902 	return PageSlab(slab_page(slab));
903 }
904 
905 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
906 static DEFINE_SPINLOCK(object_map_lock);
907 
__fill_map(unsigned long * obj_map,struct kmem_cache * s,struct slab * slab)908 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
909 		       struct slab *slab)
910 {
911 	void *addr = slab_address(slab);
912 	void *p;
913 
914 	bitmap_zero(obj_map, slab->objects);
915 
916 	for (p = slab->freelist; p; p = get_freepointer(s, p))
917 		set_bit(__obj_to_index(s, addr, p), obj_map);
918 }
919 
920 #if IS_ENABLED(CONFIG_KUNIT)
slab_add_kunit_errors(void)921 static bool slab_add_kunit_errors(void)
922 {
923 	struct kunit_resource *resource;
924 
925 	if (!kunit_get_current_test())
926 		return false;
927 
928 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
929 	if (!resource)
930 		return false;
931 
932 	(*(int *)resource->data)++;
933 	kunit_put_resource(resource);
934 	return true;
935 }
936 
slab_in_kunit_test(void)937 bool slab_in_kunit_test(void)
938 {
939 	struct kunit_resource *resource;
940 
941 	if (!kunit_get_current_test())
942 		return false;
943 
944 	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
945 	if (!resource)
946 		return false;
947 
948 	kunit_put_resource(resource);
949 	return true;
950 }
951 #else
slab_add_kunit_errors(void)952 static inline bool slab_add_kunit_errors(void) { return false; }
953 #endif
954 
size_from_object(struct kmem_cache * s)955 static inline unsigned int size_from_object(struct kmem_cache *s)
956 {
957 	if (s->flags & SLAB_RED_ZONE)
958 		return s->size - s->red_left_pad;
959 
960 	return s->size;
961 }
962 
restore_red_left(struct kmem_cache * s,void * p)963 static inline void *restore_red_left(struct kmem_cache *s, void *p)
964 {
965 	if (s->flags & SLAB_RED_ZONE)
966 		p -= s->red_left_pad;
967 
968 	return p;
969 }
970 
971 /*
972  * Debug settings:
973  */
974 #if defined(CONFIG_SLUB_DEBUG_ON)
975 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
976 #else
977 static slab_flags_t slub_debug;
978 #endif
979 
980 static const char *slub_debug_string __ro_after_init;
981 static int disable_higher_order_debug;
982 
983 /*
984  * Object debugging
985  */
986 
987 /* Verify that a pointer has an address that is valid within a slab page */
check_valid_pointer(struct kmem_cache * s,struct slab * slab,void * object)988 static inline int check_valid_pointer(struct kmem_cache *s,
989 				struct slab *slab, void *object)
990 {
991 	void *base;
992 
993 	if (!object)
994 		return 1;
995 
996 	base = slab_address(slab);
997 	object = kasan_reset_tag(object);
998 	object = restore_red_left(s, object);
999 	if (object < base || object >= base + slab->objects * s->size ||
1000 		(object - base) % s->size) {
1001 		return 0;
1002 	}
1003 
1004 	return 1;
1005 }
1006 
print_section(char * level,char * text,u8 * addr,unsigned int length)1007 static void print_section(char *level, char *text, u8 *addr,
1008 			  unsigned int length)
1009 {
1010 	metadata_access_enable();
1011 	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
1012 			16, 1, kasan_reset_tag((void *)addr), length, 1);
1013 	metadata_access_disable();
1014 }
1015 
get_track(struct kmem_cache * s,void * object,enum track_item alloc)1016 static struct track *get_track(struct kmem_cache *s, void *object,
1017 	enum track_item alloc)
1018 {
1019 	struct track *p;
1020 
1021 	p = object + get_info_end(s);
1022 
1023 	return kasan_reset_tag(p + alloc);
1024 }
1025 
1026 #ifdef CONFIG_STACKDEPOT
set_track_prepare(gfp_t gfp_flags)1027 static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
1028 {
1029 	depot_stack_handle_t handle;
1030 	unsigned long entries[TRACK_ADDRS_COUNT];
1031 	unsigned int nr_entries;
1032 
1033 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
1034 	handle = stack_depot_save(entries, nr_entries, gfp_flags);
1035 
1036 	return handle;
1037 }
1038 #else
set_track_prepare(gfp_t gfp_flags)1039 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
1040 {
1041 	return 0;
1042 }
1043 #endif
1044 
set_track_update(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr,depot_stack_handle_t handle)1045 static void set_track_update(struct kmem_cache *s, void *object,
1046 			     enum track_item alloc, unsigned long addr,
1047 			     depot_stack_handle_t handle)
1048 {
1049 	struct track *p = get_track(s, object, alloc);
1050 
1051 #ifdef CONFIG_STACKDEPOT
1052 	p->handle = handle;
1053 #endif
1054 	p->addr = addr;
1055 	p->cpu = raw_smp_processor_id();
1056 	p->pid = current->pid;
1057 	p->when = jiffies;
1058 }
1059 
set_track(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr,gfp_t gfp_flags)1060 static __always_inline void set_track(struct kmem_cache *s, void *object,
1061 				      enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
1062 {
1063 	depot_stack_handle_t handle = set_track_prepare(gfp_flags);
1064 
1065 	set_track_update(s, object, alloc, addr, handle);
1066 }
1067 
init_tracking(struct kmem_cache * s,void * object)1068 static void init_tracking(struct kmem_cache *s, void *object)
1069 {
1070 	struct track *p;
1071 
1072 	if (!(s->flags & SLAB_STORE_USER))
1073 		return;
1074 
1075 	p = get_track(s, object, TRACK_ALLOC);
1076 	memset(p, 0, 2*sizeof(struct track));
1077 }
1078 
print_track(const char * s,struct track * t,unsigned long pr_time)1079 static void print_track(const char *s, struct track *t, unsigned long pr_time)
1080 {
1081 	depot_stack_handle_t handle __maybe_unused;
1082 
1083 	if (!t->addr)
1084 		return;
1085 
1086 	pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
1087 	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
1088 #ifdef CONFIG_STACKDEPOT
1089 	handle = READ_ONCE(t->handle);
1090 	if (handle)
1091 		stack_depot_print(handle);
1092 	else
1093 		pr_err("object allocation/free stack trace missing\n");
1094 #endif
1095 }
1096 
print_tracking(struct kmem_cache * s,void * object)1097 void print_tracking(struct kmem_cache *s, void *object)
1098 {
1099 	unsigned long pr_time = jiffies;
1100 	if (!(s->flags & SLAB_STORE_USER))
1101 		return;
1102 
1103 	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
1104 	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
1105 }
1106 
print_slab_info(const struct slab * slab)1107 static void print_slab_info(const struct slab *slab)
1108 {
1109 	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
1110 	       slab, slab->objects, slab->inuse, slab->freelist,
1111 	       &slab->flags.f);
1112 }
1113 
skip_orig_size_check(struct kmem_cache * s,const void * object)1114 void skip_orig_size_check(struct kmem_cache *s, const void *object)
1115 {
1116 	set_orig_size(s, (void *)object, s->object_size);
1117 }
1118 
__slab_bug(struct kmem_cache * s,const char * fmt,va_list argsp)1119 static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
1120 {
1121 	struct va_format vaf;
1122 	va_list args;
1123 
1124 	va_copy(args, argsp);
1125 	vaf.fmt = fmt;
1126 	vaf.va = &args;
1127 	pr_err("=============================================================================\n");
1128 	pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
1129 	pr_err("-----------------------------------------------------------------------------\n\n");
1130 	va_end(args);
1131 }
1132 
slab_bug(struct kmem_cache * s,const char * fmt,...)1133 static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
1134 {
1135 	va_list args;
1136 
1137 	va_start(args, fmt);
1138 	__slab_bug(s, fmt, args);
1139 	va_end(args);
1140 }
1141 
1142 __printf(2, 3)
slab_fix(struct kmem_cache * s,const char * fmt,...)1143 static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
1144 {
1145 	struct va_format vaf;
1146 	va_list args;
1147 
1148 	if (slab_add_kunit_errors())
1149 		return;
1150 
1151 	va_start(args, fmt);
1152 	vaf.fmt = fmt;
1153 	vaf.va = &args;
1154 	pr_err("FIX %s: %pV\n", s->name, &vaf);
1155 	va_end(args);
1156 }
1157 
print_trailer(struct kmem_cache * s,struct slab * slab,u8 * p)1158 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1159 {
1160 	unsigned int off;	/* Offset of last byte */
1161 	u8 *addr = slab_address(slab);
1162 
1163 	print_tracking(s, p);
1164 
1165 	print_slab_info(slab);
1166 
1167 	pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1168 	       p, p - addr, get_freepointer(s, p));
1169 
1170 	if (s->flags & SLAB_RED_ZONE)
1171 		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
1172 			      s->red_left_pad);
1173 	else if (p > addr + 16)
1174 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1175 
1176 	print_section(KERN_ERR,         "Object   ", p,
1177 		      min_t(unsigned int, s->object_size, PAGE_SIZE));
1178 	if (s->flags & SLAB_RED_ZONE)
1179 		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
1180 			s->inuse - s->object_size);
1181 
1182 	off = get_info_end(s);
1183 
1184 	if (s->flags & SLAB_STORE_USER)
1185 		off += 2 * sizeof(struct track);
1186 
1187 	if (slub_debug_orig_size(s))
1188 		off += sizeof(unsigned long);
1189 
1190 	off += kasan_metadata_size(s, false);
1191 
1192 	if (obj_exts_in_object(s, slab))
1193 		off += sizeof(struct slabobj_ext);
1194 
1195 	if (off != size_from_object(s))
1196 		/* Beginning of the filler is the free pointer */
1197 		print_section(KERN_ERR, "Padding  ", p + off,
1198 			      size_from_object(s) - off);
1199 }
1200 
object_err(struct kmem_cache * s,struct slab * slab,u8 * object,const char * reason)1201 static void object_err(struct kmem_cache *s, struct slab *slab,
1202 			u8 *object, const char *reason)
1203 {
1204 	if (slab_add_kunit_errors())
1205 		return;
1206 
1207 	slab_bug(s, reason);
1208 	if (!object || !check_valid_pointer(s, slab, object)) {
1209 		print_slab_info(slab);
1210 		pr_err("Invalid pointer 0x%p\n", object);
1211 	} else {
1212 		print_trailer(s, slab, object);
1213 	}
1214 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1215 
1216 	WARN_ON(1);
1217 }
1218 
__slab_err(struct slab * slab)1219 static void __slab_err(struct slab *slab)
1220 {
1221 	if (slab_in_kunit_test())
1222 		return;
1223 
1224 	print_slab_info(slab);
1225 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1226 
1227 	WARN_ON(1);
1228 }
1229 
slab_err(struct kmem_cache * s,struct slab * slab,const char * fmt,...)1230 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1231 			const char *fmt, ...)
1232 {
1233 	va_list args;
1234 
1235 	if (slab_add_kunit_errors())
1236 		return;
1237 
1238 	va_start(args, fmt);
1239 	__slab_bug(s, fmt, args);
1240 	va_end(args);
1241 
1242 	__slab_err(slab);
1243 }
1244 
init_object(struct kmem_cache * s,void * object,u8 val)1245 static void init_object(struct kmem_cache *s, void *object, u8 val)
1246 {
1247 	u8 *p = kasan_reset_tag(object);
1248 	unsigned int poison_size = s->object_size;
1249 
1250 	if (s->flags & SLAB_RED_ZONE) {
1251 		/*
1252 		 * Here and below, avoid overwriting the KMSAN shadow. Keeping
1253 		 * the shadow makes it possible to distinguish uninit-value
1254 		 * from use-after-free.
1255 		 */
1256 		memset_no_sanitize_memory(p - s->red_left_pad, val,
1257 					  s->red_left_pad);
1258 
1259 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1260 			/*
1261 			 * Redzone the extra allocated space by kmalloc than
1262 			 * requested, and the poison size will be limited to
1263 			 * the original request size accordingly.
1264 			 */
1265 			poison_size = get_orig_size(s, object);
1266 		}
1267 	}
1268 
1269 	if (s->flags & __OBJECT_POISON) {
1270 		memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1);
1271 		memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1);
1272 	}
1273 
1274 	if (s->flags & SLAB_RED_ZONE)
1275 		memset_no_sanitize_memory(p + poison_size, val,
1276 					  s->inuse - poison_size);
1277 }
1278 
restore_bytes(struct kmem_cache * s,const char * message,u8 data,void * from,void * to)1279 static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
1280 						void *from, void *to)
1281 {
1282 	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1283 	memset(from, data, to - from);
1284 }
1285 
1286 #ifdef CONFIG_KMSAN
1287 #define pad_check_attributes noinline __no_kmsan_checks
1288 #else
1289 #define pad_check_attributes
1290 #endif
1291 
1292 static pad_check_attributes int
check_bytes_and_report(struct kmem_cache * s,struct slab * slab,u8 * object,const char * what,u8 * start,unsigned int value,unsigned int bytes,bool slab_obj_print)1293 check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1294 		       u8 *object, const char *what, u8 *start, unsigned int value,
1295 		       unsigned int bytes, bool slab_obj_print)
1296 {
1297 	u8 *fault;
1298 	u8 *end;
1299 	u8 *addr = slab_address(slab);
1300 
1301 	metadata_access_enable();
1302 	fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1303 	metadata_access_disable();
1304 	if (!fault)
1305 		return 1;
1306 
1307 	end = start + bytes;
1308 	while (end > fault && end[-1] == value)
1309 		end--;
1310 
1311 	if (slab_add_kunit_errors())
1312 		goto skip_bug_print;
1313 
1314 	pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1315 	       what, fault, end - 1, fault - addr, fault[0], value);
1316 
1317 	if (slab_obj_print)
1318 		object_err(s, slab, object, "Object corrupt");
1319 
1320 skip_bug_print:
1321 	restore_bytes(s, what, value, fault, end);
1322 	return 0;
1323 }
1324 
1325 /*
1326  * Object field layout:
1327  *
1328  * [Left redzone padding] (if SLAB_RED_ZONE)
1329  *   - Field size: s->red_left_pad
1330  *   - Immediately precedes each object when SLAB_RED_ZONE is set.
1331  *   - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1332  *     0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
1333  *
1334  * [Object bytes] (object address starts here)
1335  *   - Field size: s->object_size
1336  *   - Object payload bytes.
1337  *   - If the freepointer may overlap the object, it is stored inside
1338  *     the object (typically near the middle).
1339  *   - Poisoning uses 0x6b (POISON_FREE) and the last byte is
1340  *     0xa5 (POISON_END) when __OBJECT_POISON is enabled.
1341  *
1342  * [Word-align padding] (right redzone when SLAB_RED_ZONE is set)
1343  *   - Field size: s->inuse - s->object_size
1344  *   - If redzoning is enabled and ALIGN(size, sizeof(void *)) adds no
1345  *     padding, explicitly extend by one word so the right redzone is
1346  *     non-empty.
1347  *   - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1348  *     0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
1349  *
1350  * [Metadata starts at object + s->inuse]
1351  *   - A. freelist pointer (if freeptr_outside_object)
1352  *   - B. alloc tracking (SLAB_STORE_USER)
1353  *   - C. free tracking (SLAB_STORE_USER)
1354  *   - D. original request size (SLAB_KMALLOC && SLAB_STORE_USER)
1355  *   - E. KASAN metadata (if enabled)
1356  *
1357  * [Mandatory padding] (if CONFIG_SLUB_DEBUG && SLAB_RED_ZONE)
1358  *   - One mandatory debug word to guarantee a minimum poisoned gap
1359  *     between metadata and the next object, independent of alignment.
1360  *   - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set.
1361  * [Final alignment padding]
1362  *   - Bytes added by ALIGN(size, s->align) to reach s->size.
1363  *   - When the padding is large enough, it can be used to store
1364  *     struct slabobj_ext for accounting metadata (obj_exts_in_object()).
1365  *   - The remaining bytes (if any) are filled with 0x5a (POISON_INUSE)
1366  *     when SLAB_POISON is set.
1367  *
1368  * Notes:
1369  * - Redzones are filled by init_object() with SLUB_RED_ACTIVE/INACTIVE.
1370  * - Object contents are poisoned with POISON_FREE/END when __OBJECT_POISON.
1371  * - The trailing padding is pre-filled with POISON_INUSE by
1372  *   setup_slab_debug() when SLAB_POISON is set, and is validated by
1373  *   check_pad_bytes().
1374  * - The first object pointer is slab_address(slab) +
1375  *   (s->red_left_pad if redzoning); subsequent objects are reached by
1376  *   adding s->size each time.
1377  *
1378  * If a slab cache flag relies on specific metadata to exist at a fixed
1379  * offset, the flag must be included in SLAB_NEVER_MERGE to prevent merging.
1380  * Otherwise, the cache would misbehave as s->object_size and s->inuse are
1381  * adjusted during cache merging (see __kmem_cache_alias()).
1382  */
check_pad_bytes(struct kmem_cache * s,struct slab * slab,u8 * p)1383 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1384 {
1385 	unsigned long off = get_info_end(s);	/* The end of info */
1386 
1387 	if (s->flags & SLAB_STORE_USER) {
1388 		/* We also have user information there */
1389 		off += 2 * sizeof(struct track);
1390 
1391 		if (s->flags & SLAB_KMALLOC)
1392 			off += sizeof(unsigned long);
1393 	}
1394 
1395 	off += kasan_metadata_size(s, false);
1396 
1397 	if (obj_exts_in_object(s, slab))
1398 		off += sizeof(struct slabobj_ext);
1399 
1400 	if (size_from_object(s) == off)
1401 		return 1;
1402 
1403 	return check_bytes_and_report(s, slab, p, "Object padding",
1404 			p + off, POISON_INUSE, size_from_object(s) - off, true);
1405 }
1406 
1407 /* Check the pad bytes at the end of a slab page */
1408 static pad_check_attributes void
slab_pad_check(struct kmem_cache * s,struct slab * slab)1409 slab_pad_check(struct kmem_cache *s, struct slab *slab)
1410 {
1411 	u8 *start;
1412 	u8 *fault;
1413 	u8 *end;
1414 	u8 *pad;
1415 	int length;
1416 	int remainder;
1417 
1418 	if (!(s->flags & SLAB_POISON))
1419 		return;
1420 
1421 	start = slab_address(slab);
1422 	length = slab_size(slab);
1423 	end = start + length;
1424 
1425 	if (obj_exts_in_slab(s, slab) && !obj_exts_in_object(s, slab)) {
1426 		remainder = length;
1427 		remainder -= obj_exts_offset_in_slab(s, slab);
1428 		remainder -= obj_exts_size_in_slab(slab);
1429 	} else {
1430 		remainder = length % s->size;
1431 	}
1432 
1433 	if (!remainder)
1434 		return;
1435 
1436 	pad = end - remainder;
1437 	metadata_access_enable();
1438 	fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1439 	metadata_access_disable();
1440 	if (!fault)
1441 		return;
1442 	while (end > fault && end[-1] == POISON_INUSE)
1443 		end--;
1444 
1445 	slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1446 		 fault, end - 1, fault - start);
1447 	print_section(KERN_ERR, "Padding ", pad, remainder);
1448 	__slab_err(slab);
1449 
1450 	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1451 }
1452 
check_object(struct kmem_cache * s,struct slab * slab,void * object,u8 val)1453 static int check_object(struct kmem_cache *s, struct slab *slab,
1454 					void *object, u8 val)
1455 {
1456 	u8 *p = object;
1457 	u8 *endobject = object + s->object_size;
1458 	unsigned int orig_size, kasan_meta_size;
1459 	int ret = 1;
1460 
1461 	if (s->flags & SLAB_RED_ZONE) {
1462 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1463 			object - s->red_left_pad, val, s->red_left_pad, ret))
1464 			ret = 0;
1465 
1466 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1467 			endobject, val, s->inuse - s->object_size, ret))
1468 			ret = 0;
1469 
1470 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1471 			orig_size = get_orig_size(s, object);
1472 
1473 			if (s->object_size > orig_size  &&
1474 				!check_bytes_and_report(s, slab, object,
1475 					"kmalloc Redzone", p + orig_size,
1476 					val, s->object_size - orig_size, ret)) {
1477 				ret = 0;
1478 			}
1479 		}
1480 	} else {
1481 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1482 			if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1483 				endobject, POISON_INUSE,
1484 				s->inuse - s->object_size, ret))
1485 				ret = 0;
1486 		}
1487 	}
1488 
1489 	if (s->flags & SLAB_POISON) {
1490 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1491 			/*
1492 			 * KASAN can save its free meta data inside of the
1493 			 * object at offset 0. Thus, skip checking the part of
1494 			 * the redzone that overlaps with the meta data.
1495 			 */
1496 			kasan_meta_size = kasan_metadata_size(s, true);
1497 			if (kasan_meta_size < s->object_size - 1 &&
1498 			    !check_bytes_and_report(s, slab, p, "Poison",
1499 					p + kasan_meta_size, POISON_FREE,
1500 					s->object_size - kasan_meta_size - 1, ret))
1501 				ret = 0;
1502 			if (kasan_meta_size < s->object_size &&
1503 			    !check_bytes_and_report(s, slab, p, "End Poison",
1504 					p + s->object_size - 1, POISON_END, 1, ret))
1505 				ret = 0;
1506 		}
1507 		/*
1508 		 * check_pad_bytes cleans up on its own.
1509 		 */
1510 		if (!check_pad_bytes(s, slab, p))
1511 			ret = 0;
1512 	}
1513 
1514 	/*
1515 	 * Cannot check freepointer while object is allocated if
1516 	 * object and freepointer overlap.
1517 	 */
1518 	if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1519 	    !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1520 		object_err(s, slab, p, "Freepointer corrupt");
1521 		/*
1522 		 * No choice but to zap it and thus lose the remainder
1523 		 * of the free objects in this slab. May cause
1524 		 * another error because the object count is now wrong.
1525 		 */
1526 		set_freepointer(s, p, NULL);
1527 		ret = 0;
1528 	}
1529 
1530 	return ret;
1531 }
1532 
1533 /*
1534  * Checks if the slab state looks sane. Assumes the struct slab pointer
1535  * was either obtained in a way that ensures it's valid, or validated
1536  * by validate_slab_ptr()
1537  */
check_slab(struct kmem_cache * s,struct slab * slab)1538 static int check_slab(struct kmem_cache *s, struct slab *slab)
1539 {
1540 	int maxobj;
1541 
1542 	maxobj = order_objects(slab_order(slab), s->size);
1543 	if (slab->objects > maxobj) {
1544 		slab_err(s, slab, "objects %u > max %u",
1545 			slab->objects, maxobj);
1546 		return 0;
1547 	}
1548 	if (slab->inuse > slab->objects) {
1549 		slab_err(s, slab, "inuse %u > max %u",
1550 			slab->inuse, slab->objects);
1551 		return 0;
1552 	}
1553 	if (slab->frozen) {
1554 		slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
1555 		return 0;
1556 	}
1557 
1558 	/* Slab_pad_check fixes things up after itself */
1559 	slab_pad_check(s, slab);
1560 	return 1;
1561 }
1562 
1563 /*
1564  * Determine if a certain object in a slab is on the freelist. Must hold the
1565  * slab lock to guarantee that the chains are in a consistent state.
1566  */
on_freelist(struct kmem_cache * s,struct slab * slab,void * search)1567 static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1568 {
1569 	int nr = 0;
1570 	void *fp;
1571 	void *object = NULL;
1572 	int max_objects;
1573 
1574 	fp = slab->freelist;
1575 	while (fp && nr <= slab->objects) {
1576 		if (fp == search)
1577 			return true;
1578 		if (!check_valid_pointer(s, slab, fp)) {
1579 			if (object) {
1580 				object_err(s, slab, object,
1581 					"Freechain corrupt");
1582 				set_freepointer(s, object, NULL);
1583 				break;
1584 			} else {
1585 				slab_err(s, slab, "Freepointer corrupt");
1586 				slab->freelist = NULL;
1587 				slab->inuse = slab->objects;
1588 				slab_fix(s, "Freelist cleared");
1589 				return false;
1590 			}
1591 		}
1592 		object = fp;
1593 		fp = get_freepointer(s, object);
1594 		nr++;
1595 	}
1596 
1597 	if (nr > slab->objects) {
1598 		slab_err(s, slab, "Freelist cycle detected");
1599 		slab->freelist = NULL;
1600 		slab->inuse = slab->objects;
1601 		slab_fix(s, "Freelist cleared");
1602 		return false;
1603 	}
1604 
1605 	max_objects = order_objects(slab_order(slab), s->size);
1606 	if (max_objects > MAX_OBJS_PER_PAGE)
1607 		max_objects = MAX_OBJS_PER_PAGE;
1608 
1609 	if (slab->objects != max_objects) {
1610 		slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1611 			 slab->objects, max_objects);
1612 		slab->objects = max_objects;
1613 		slab_fix(s, "Number of objects adjusted");
1614 	}
1615 	if (slab->inuse != slab->objects - nr) {
1616 		slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1617 			 slab->inuse, slab->objects - nr);
1618 		slab->inuse = slab->objects - nr;
1619 		slab_fix(s, "Object count adjusted");
1620 	}
1621 	return search == NULL;
1622 }
1623 
trace(struct kmem_cache * s,struct slab * slab,void * object,int alloc)1624 static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1625 								int alloc)
1626 {
1627 	if (s->flags & SLAB_TRACE) {
1628 		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1629 			s->name,
1630 			alloc ? "alloc" : "free",
1631 			object, slab->inuse,
1632 			slab->freelist);
1633 
1634 		if (!alloc)
1635 			print_section(KERN_INFO, "Object ", (void *)object,
1636 					s->object_size);
1637 
1638 		dump_stack();
1639 	}
1640 }
1641 
1642 /*
1643  * Tracking of fully allocated slabs for debugging purposes.
1644  */
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab)1645 static void add_full(struct kmem_cache *s,
1646 	struct kmem_cache_node *n, struct slab *slab)
1647 {
1648 	if (!(s->flags & SLAB_STORE_USER))
1649 		return;
1650 
1651 	lockdep_assert_held(&n->list_lock);
1652 	list_add(&slab->slab_list, &n->full);
1653 }
1654 
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab)1655 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1656 {
1657 	if (!(s->flags & SLAB_STORE_USER))
1658 		return;
1659 
1660 	lockdep_assert_held(&n->list_lock);
1661 	list_del(&slab->slab_list);
1662 }
1663 
node_nr_slabs(struct kmem_cache_node * n)1664 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1665 {
1666 	return atomic_long_read(&n->nr_slabs);
1667 }
1668 
inc_slabs_node(struct kmem_cache * s,int node,int objects)1669 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1670 {
1671 	struct kmem_cache_node *n = get_node(s, node);
1672 
1673 	atomic_long_inc(&n->nr_slabs);
1674 	atomic_long_add(objects, &n->total_objects);
1675 }
dec_slabs_node(struct kmem_cache * s,int node,int objects)1676 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1677 {
1678 	struct kmem_cache_node *n = get_node(s, node);
1679 
1680 	atomic_long_dec(&n->nr_slabs);
1681 	atomic_long_sub(objects, &n->total_objects);
1682 }
1683 
1684 /* Object debug checks for alloc/free paths */
setup_object_debug(struct kmem_cache * s,void * object)1685 static void setup_object_debug(struct kmem_cache *s, void *object)
1686 {
1687 	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1688 		return;
1689 
1690 	init_object(s, object, SLUB_RED_INACTIVE);
1691 	init_tracking(s, object);
1692 }
1693 
1694 static
setup_slab_debug(struct kmem_cache * s,struct slab * slab,void * addr)1695 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1696 {
1697 	if (!kmem_cache_debug_flags(s, SLAB_POISON))
1698 		return;
1699 
1700 	metadata_access_enable();
1701 	memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1702 	metadata_access_disable();
1703 }
1704 
alloc_consistency_checks(struct kmem_cache * s,struct slab * slab,void * object)1705 static inline int alloc_consistency_checks(struct kmem_cache *s,
1706 					struct slab *slab, void *object)
1707 {
1708 	if (!check_slab(s, slab))
1709 		return 0;
1710 
1711 	if (!check_valid_pointer(s, slab, object)) {
1712 		object_err(s, slab, object, "Freelist Pointer check fails");
1713 		return 0;
1714 	}
1715 
1716 	if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1717 		return 0;
1718 
1719 	return 1;
1720 }
1721 
alloc_debug_processing(struct kmem_cache * s,struct slab * slab,void * object,int orig_size)1722 static noinline bool alloc_debug_processing(struct kmem_cache *s,
1723 			struct slab *slab, void *object, int orig_size)
1724 {
1725 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1726 		if (!alloc_consistency_checks(s, slab, object))
1727 			goto bad;
1728 	}
1729 
1730 	/* Success. Perform special debug activities for allocs */
1731 	trace(s, slab, object, 1);
1732 	set_orig_size(s, object, orig_size);
1733 	init_object(s, object, SLUB_RED_ACTIVE);
1734 	return true;
1735 
1736 bad:
1737 	/*
1738 	 * Let's do the best we can to avoid issues in the future. Marking all
1739 	 * objects as used avoids touching the remaining objects.
1740 	 */
1741 	slab_fix(s, "Marking all objects used");
1742 	slab->inuse = slab->objects;
1743 	slab->freelist = NULL;
1744 	slab->frozen = 1; /* mark consistency-failed slab as frozen */
1745 
1746 	return false;
1747 }
1748 
free_consistency_checks(struct kmem_cache * s,struct slab * slab,void * object,unsigned long addr)1749 static inline int free_consistency_checks(struct kmem_cache *s,
1750 		struct slab *slab, void *object, unsigned long addr)
1751 {
1752 	if (!check_valid_pointer(s, slab, object)) {
1753 		slab_err(s, slab, "Invalid object pointer 0x%p", object);
1754 		return 0;
1755 	}
1756 
1757 	if (on_freelist(s, slab, object)) {
1758 		object_err(s, slab, object, "Object already free");
1759 		return 0;
1760 	}
1761 
1762 	if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1763 		return 0;
1764 
1765 	if (unlikely(s != slab->slab_cache)) {
1766 		if (!slab->slab_cache) {
1767 			slab_err(NULL, slab, "No slab cache for object 0x%p",
1768 				 object);
1769 		} else {
1770 			object_err(s, slab, object,
1771 				   "page slab pointer corrupt.");
1772 		}
1773 		return 0;
1774 	}
1775 	return 1;
1776 }
1777 
1778 /*
1779  * Parse a block of slab_debug options. Blocks are delimited by ';'
1780  *
1781  * @str:    start of block
1782  * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1783  * @slabs:  return start of list of slabs, or NULL when there's no list
1784  * @init:   assume this is initial parsing and not per-kmem-create parsing
1785  *
1786  * returns the start of next block if there's any, or NULL
1787  */
1788 static const char *
parse_slub_debug_flags(const char * str,slab_flags_t * flags,const char ** slabs,bool init)1789 parse_slub_debug_flags(const char *str, slab_flags_t *flags, const char **slabs, bool init)
1790 {
1791 	bool higher_order_disable = false;
1792 
1793 	/* Skip any completely empty blocks */
1794 	while (*str && *str == ';')
1795 		str++;
1796 
1797 	if (*str == ',') {
1798 		/*
1799 		 * No options but restriction on slabs. This means full
1800 		 * debugging for slabs matching a pattern.
1801 		 */
1802 		*flags = DEBUG_DEFAULT_FLAGS;
1803 		goto check_slabs;
1804 	}
1805 	*flags = 0;
1806 
1807 	/* Determine which debug features should be switched on */
1808 	for (; *str && *str != ',' && *str != ';'; str++) {
1809 		switch (tolower(*str)) {
1810 		case '-':
1811 			*flags = 0;
1812 			break;
1813 		case 'f':
1814 			*flags |= SLAB_CONSISTENCY_CHECKS;
1815 			break;
1816 		case 'z':
1817 			*flags |= SLAB_RED_ZONE;
1818 			break;
1819 		case 'p':
1820 			*flags |= SLAB_POISON;
1821 			break;
1822 		case 'u':
1823 			*flags |= SLAB_STORE_USER;
1824 			break;
1825 		case 't':
1826 			*flags |= SLAB_TRACE;
1827 			break;
1828 		case 'a':
1829 			*flags |= SLAB_FAILSLAB;
1830 			break;
1831 		case 'o':
1832 			/*
1833 			 * Avoid enabling debugging on caches if its minimum
1834 			 * order would increase as a result.
1835 			 */
1836 			higher_order_disable = true;
1837 			break;
1838 		default:
1839 			if (init)
1840 				pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1841 		}
1842 	}
1843 check_slabs:
1844 	if (*str == ',')
1845 		*slabs = ++str;
1846 	else
1847 		*slabs = NULL;
1848 
1849 	/* Skip over the slab list */
1850 	while (*str && *str != ';')
1851 		str++;
1852 
1853 	/* Skip any completely empty blocks */
1854 	while (*str && *str == ';')
1855 		str++;
1856 
1857 	if (init && higher_order_disable)
1858 		disable_higher_order_debug = 1;
1859 
1860 	if (*str)
1861 		return str;
1862 	else
1863 		return NULL;
1864 }
1865 
setup_slub_debug(const char * str,const struct kernel_param * kp)1866 static int __init setup_slub_debug(const char *str, const struct kernel_param *kp)
1867 {
1868 	slab_flags_t flags;
1869 	slab_flags_t global_flags;
1870 	const char *saved_str;
1871 	const char *slab_list;
1872 	bool global_slub_debug_changed = false;
1873 	bool slab_list_specified = false;
1874 
1875 	global_flags = DEBUG_DEFAULT_FLAGS;
1876 	if (!str || !*str)
1877 		/*
1878 		 * No options specified. Switch on full debugging.
1879 		 */
1880 		goto out;
1881 
1882 	saved_str = str;
1883 	while (str) {
1884 		str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1885 
1886 		if (!slab_list) {
1887 			global_flags = flags;
1888 			global_slub_debug_changed = true;
1889 		} else {
1890 			slab_list_specified = true;
1891 			if (flags & SLAB_STORE_USER)
1892 				stack_depot_request_early_init();
1893 		}
1894 	}
1895 
1896 	/*
1897 	 * For backwards compatibility, a single list of flags with list of
1898 	 * slabs means debugging is only changed for those slabs, so the global
1899 	 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1900 	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1901 	 * long as there is no option specifying flags without a slab list.
1902 	 */
1903 	if (slab_list_specified) {
1904 		if (!global_slub_debug_changed)
1905 			global_flags = slub_debug;
1906 		slub_debug_string = saved_str;
1907 	}
1908 out:
1909 	slub_debug = global_flags;
1910 	if (slub_debug & SLAB_STORE_USER)
1911 		stack_depot_request_early_init();
1912 	if (slub_debug != 0 || slub_debug_string)
1913 		static_branch_enable(&slub_debug_enabled);
1914 	else
1915 		static_branch_disable(&slub_debug_enabled);
1916 	if ((static_branch_unlikely(&init_on_alloc) ||
1917 	     static_branch_unlikely(&init_on_free)) &&
1918 	    (slub_debug & SLAB_POISON))
1919 		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1920 	return 0;
1921 }
1922 
1923 static const struct kernel_param_ops param_ops_slab_debug __initconst = {
1924 	.flags = KERNEL_PARAM_OPS_FL_NOARG,
1925 	.set = setup_slub_debug,
1926 };
1927 __core_param_cb(slab_debug, &param_ops_slab_debug, NULL, 0);
1928 __core_param_cb(slub_debug, &param_ops_slab_debug, NULL, 0);
1929 
1930 /*
1931  * kmem_cache_flags - apply debugging options to the cache
1932  * @flags:		flags to set
1933  * @name:		name of the cache
1934  *
1935  * Debug option(s) are applied to @flags. In addition to the debug
1936  * option(s), if a slab name (or multiple) is specified i.e.
1937  * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1938  * then only the select slabs will receive the debug option(s).
1939  */
kmem_cache_flags(slab_flags_t flags,const char * name)1940 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1941 {
1942 	const char *iter;
1943 	size_t len;
1944 	const char *next_block;
1945 	slab_flags_t block_flags;
1946 	slab_flags_t slub_debug_local = slub_debug;
1947 
1948 	if (flags & SLAB_NO_USER_FLAGS)
1949 		return flags;
1950 
1951 	/*
1952 	 * If the slab cache is for debugging (e.g. kmemleak) then
1953 	 * don't store user (stack trace) information by default,
1954 	 * but let the user enable it via the command line below.
1955 	 */
1956 	if (flags & SLAB_NOLEAKTRACE)
1957 		slub_debug_local &= ~SLAB_STORE_USER;
1958 
1959 	len = strlen(name);
1960 	next_block = slub_debug_string;
1961 	/* Go through all blocks of debug options, see if any matches our slab's name */
1962 	while (next_block) {
1963 		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1964 		if (!iter)
1965 			continue;
1966 		/* Found a block that has a slab list, search it */
1967 		while (*iter) {
1968 			const char *end, *glob;
1969 			size_t cmplen;
1970 
1971 			end = strchrnul(iter, ',');
1972 			if (next_block && next_block < end)
1973 				end = next_block - 1;
1974 
1975 			glob = strnchr(iter, end - iter, '*');
1976 			if (glob)
1977 				cmplen = glob - iter;
1978 			else
1979 				cmplen = max_t(size_t, len, (end - iter));
1980 
1981 			if (!strncmp(name, iter, cmplen)) {
1982 				flags |= block_flags;
1983 				return flags;
1984 			}
1985 
1986 			if (!*end || *end == ';')
1987 				break;
1988 			iter = end + 1;
1989 		}
1990 	}
1991 
1992 	return flags | slub_debug_local;
1993 }
1994 #else /* !CONFIG_SLUB_DEBUG */
setup_object_debug(struct kmem_cache * s,void * object)1995 static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1996 static inline
setup_slab_debug(struct kmem_cache * s,struct slab * slab,void * addr)1997 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1998 
alloc_debug_processing(struct kmem_cache * s,struct slab * slab,void * object,int orig_size)1999 static inline bool alloc_debug_processing(struct kmem_cache *s,
2000 	struct slab *slab, void *object, int orig_size) { return true; }
2001 
free_debug_processing(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int * bulk_cnt,unsigned long addr,depot_stack_handle_t handle)2002 static inline bool free_debug_processing(struct kmem_cache *s,
2003 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
2004 	unsigned long addr, depot_stack_handle_t handle) { return true; }
2005 
slab_pad_check(struct kmem_cache * s,struct slab * slab)2006 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
check_object(struct kmem_cache * s,struct slab * slab,void * object,u8 val)2007 static inline int check_object(struct kmem_cache *s, struct slab *slab,
2008 			void *object, u8 val) { return 1; }
set_track_prepare(gfp_t gfp_flags)2009 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
set_track(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr,gfp_t gfp_flags)2010 static inline void set_track(struct kmem_cache *s, void *object,
2011 			     enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab)2012 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
2013 					struct slab *slab) {}
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab)2014 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
2015 					struct slab *slab) {}
kmem_cache_flags(slab_flags_t flags,const char * name)2016 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
2017 {
2018 	return flags;
2019 }
2020 #define slub_debug 0
2021 
2022 #define disable_higher_order_debug 0
2023 
node_nr_slabs(struct kmem_cache_node * n)2024 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
2025 							{ return 0; }
inc_slabs_node(struct kmem_cache * s,int node,int objects)2026 static inline void inc_slabs_node(struct kmem_cache *s, int node,
2027 							int objects) {}
dec_slabs_node(struct kmem_cache * s,int node,int objects)2028 static inline void dec_slabs_node(struct kmem_cache *s, int node,
2029 							int objects) {}
2030 #endif /* CONFIG_SLUB_DEBUG */
2031 
2032 /*
2033  * The allocated objcg pointers array is not accounted directly.
2034  * Moreover, it should not come from DMA buffer and is not readily
2035  * reclaimable. So those GFP bits should be masked off.
2036  */
2037 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2038 				__GFP_ACCOUNT | __GFP_NOFAIL)
2039 
2040 #ifdef CONFIG_SLAB_OBJ_EXT
2041 
2042 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
2043 
mark_obj_codetag_empty(const void * obj)2044 static inline void mark_obj_codetag_empty(const void *obj)
2045 {
2046 	struct slab *obj_slab;
2047 	unsigned long slab_exts;
2048 
2049 	obj_slab = virt_to_slab(obj);
2050 	slab_exts = slab_obj_exts(obj_slab);
2051 	if (slab_exts) {
2052 		get_slab_obj_exts(slab_exts);
2053 		unsigned int offs = obj_to_index(obj_slab->slab_cache,
2054 						 obj_slab, obj);
2055 		struct slabobj_ext *ext = slab_obj_ext(obj_slab,
2056 						       slab_exts, offs);
2057 
2058 		if (unlikely(is_codetag_empty(&ext->ref))) {
2059 			put_slab_obj_exts(slab_exts);
2060 			return;
2061 		}
2062 
2063 		/* codetag should be NULL here */
2064 		WARN_ON(ext->ref.ct);
2065 		set_codetag_empty(&ext->ref);
2066 		put_slab_obj_exts(slab_exts);
2067 	}
2068 }
2069 
mark_failed_objexts_alloc(struct slab * slab)2070 static inline bool mark_failed_objexts_alloc(struct slab *slab)
2071 {
2072 	return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
2073 }
2074 
handle_failed_objexts_alloc(unsigned long obj_exts,struct slabobj_ext * vec,unsigned int objects)2075 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
2076 			struct slabobj_ext *vec, unsigned int objects)
2077 {
2078 	/*
2079 	 * If vector previously failed to allocate then we have live
2080 	 * objects with no tag reference. Mark all references in this
2081 	 * vector as empty to avoid warnings later on.
2082 	 */
2083 	if (obj_exts == OBJEXTS_ALLOC_FAIL) {
2084 		unsigned int i;
2085 
2086 		for (i = 0; i < objects; i++)
2087 			set_codetag_empty(&vec[i].ref);
2088 	}
2089 }
2090 
2091 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
2092 
mark_obj_codetag_empty(const void * obj)2093 static inline void mark_obj_codetag_empty(const void *obj) {}
mark_failed_objexts_alloc(struct slab * slab)2094 static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
handle_failed_objexts_alloc(unsigned long obj_exts,struct slabobj_ext * vec,unsigned int objects)2095 static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
2096 			struct slabobj_ext *vec, unsigned int objects) {}
2097 
2098 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
2099 
init_slab_obj_exts(struct slab * slab)2100 static inline void init_slab_obj_exts(struct slab *slab)
2101 {
2102 	slab->obj_exts = 0;
2103 }
2104 
2105 /*
2106  * Calculate the allocation size for slabobj_ext array.
2107  *
2108  * When memory allocation profiling is enabled, the obj_exts array
2109  * could be allocated from the same slab cache it's being allocated for.
2110  * This would prevent the slab from ever being freed because it would
2111  * always contain at least one allocated object (its own obj_exts array).
2112  *
2113  * To avoid this, increase the allocation size when we detect the array
2114  * may come from the same cache, forcing it to use a different cache.
2115  */
obj_exts_alloc_size(struct kmem_cache * s,struct slab * slab,gfp_t gfp)2116 static inline size_t obj_exts_alloc_size(struct kmem_cache *s,
2117 					 struct slab *slab, gfp_t gfp)
2118 {
2119 	size_t sz = sizeof(struct slabobj_ext) * slab->objects;
2120 	struct kmem_cache *obj_exts_cache;
2121 
2122 	/*
2123 	 * slabobj_ext array for KMALLOC_CGROUP allocations
2124 	 * are served from KMALLOC_NORMAL caches.
2125 	 */
2126 	if (!mem_alloc_profiling_enabled())
2127 		return sz;
2128 
2129 	if (sz > KMALLOC_MAX_CACHE_SIZE)
2130 		return sz;
2131 
2132 	if (!is_kmalloc_normal(s))
2133 		return sz;
2134 
2135 	obj_exts_cache = kmalloc_slab(sz, NULL, gfp, 0);
2136 	/*
2137 	 * We can't simply compare s with obj_exts_cache, because random kmalloc
2138 	 * caches have multiple caches per size, selected by caller address.
2139 	 * Since caller address may differ between kmalloc_slab() and actual
2140 	 * allocation, bump size when sizes are equal.
2141 	 */
2142 	if (s->object_size == obj_exts_cache->object_size)
2143 		return obj_exts_cache->object_size + 1;
2144 
2145 	return sz;
2146 }
2147 
alloc_slab_obj_exts(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab)2148 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2149 		        gfp_t gfp, bool new_slab)
2150 {
2151 	bool allow_spin = gfpflags_allow_spinning(gfp);
2152 	unsigned int objects = objs_per_slab(s, slab);
2153 	unsigned long new_exts;
2154 	unsigned long old_exts;
2155 	struct slabobj_ext *vec;
2156 	size_t sz;
2157 
2158 	gfp &= ~OBJCGS_CLEAR_MASK;
2159 	/* Prevent recursive extension vector allocation */
2160 	gfp |= __GFP_NO_OBJ_EXT;
2161 
2162 	sz = obj_exts_alloc_size(s, slab, gfp);
2163 
2164 	/*
2165 	 * Note that allow_spin may be false during early boot and its
2166 	 * restricted GFP_BOOT_MASK. Due to kmalloc_nolock() only supporting
2167 	 * architectures with cmpxchg16b, early obj_exts will be missing for
2168 	 * very early allocations on those.
2169 	 */
2170 	if (unlikely(!allow_spin))
2171 		vec = kmalloc_nolock(sz, __GFP_ZERO | __GFP_NO_OBJ_EXT,
2172 				     slab_nid(slab));
2173 	else
2174 		vec = kmalloc_node(sz, gfp | __GFP_ZERO, slab_nid(slab));
2175 
2176 	if (!vec) {
2177 		/*
2178 		 * Try to mark vectors which failed to allocate.
2179 		 * If this operation fails, there may be a racing process
2180 		 * that has already completed the allocation.
2181 		 */
2182 		if (!mark_failed_objexts_alloc(slab) &&
2183 		    slab_obj_exts(slab))
2184 			return 0;
2185 
2186 		return -ENOMEM;
2187 	}
2188 
2189 	VM_WARN_ON_ONCE(virt_to_slab(vec) != NULL &&
2190 			virt_to_slab(vec)->slab_cache == s);
2191 
2192 	new_exts = (unsigned long)vec;
2193 #ifdef CONFIG_MEMCG
2194 	new_exts |= MEMCG_DATA_OBJEXTS;
2195 #endif
2196 retry:
2197 	old_exts = READ_ONCE(slab->obj_exts);
2198 	handle_failed_objexts_alloc(old_exts, vec, objects);
2199 
2200 	if (new_slab) {
2201 		/*
2202 		 * If the slab is brand new and nobody can yet access its
2203 		 * obj_exts, no synchronization is required and obj_exts can
2204 		 * be simply assigned.
2205 		 */
2206 		slab->obj_exts = new_exts;
2207 	} else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
2208 		/*
2209 		 * If the slab is already in use, somebody can allocate and
2210 		 * assign slabobj_exts in parallel. In this case the existing
2211 		 * objcg vector should be reused.
2212 		 */
2213 		mark_obj_codetag_empty(vec);
2214 		if (unlikely(!allow_spin))
2215 			kfree_nolock(vec);
2216 		else
2217 			kfree(vec);
2218 		return 0;
2219 	} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
2220 		/* Retry if a racing thread changed slab->obj_exts from under us. */
2221 		goto retry;
2222 	}
2223 
2224 	if (allow_spin)
2225 		kmemleak_not_leak(vec);
2226 	return 0;
2227 }
2228 
free_slab_obj_exts(struct slab * slab,bool allow_spin)2229 static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
2230 {
2231 	struct slabobj_ext *obj_exts;
2232 
2233 	obj_exts = (struct slabobj_ext *)slab_obj_exts(slab);
2234 	if (!obj_exts) {
2235 		/*
2236 		 * If obj_exts allocation failed, slab->obj_exts is set to
2237 		 * OBJEXTS_ALLOC_FAIL. In this case, we end up here and should
2238 		 * clear the flag.
2239 		 */
2240 		slab->obj_exts = 0;
2241 		return;
2242 	}
2243 
2244 	if (obj_exts_in_slab(slab->slab_cache, slab)) {
2245 		slab->obj_exts = 0;
2246 		return;
2247 	}
2248 
2249 	/*
2250 	 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
2251 	 * corresponding extension will be NULL. alloc_tag_sub() will throw a
2252 	 * warning if slab has extensions but the extension of an object is
2253 	 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
2254 	 * the extension for obj_exts is expected to be NULL.
2255 	 */
2256 	mark_obj_codetag_empty(obj_exts);
2257 	if (allow_spin)
2258 		kfree(obj_exts);
2259 	else
2260 		kfree_nolock(obj_exts);
2261 	slab->obj_exts = 0;
2262 }
2263 
2264 /*
2265  * Try to allocate slabobj_ext array from unused space.
2266  * This function must be called on a freshly allocated slab to prevent
2267  * concurrency problems.
2268  */
alloc_slab_obj_exts_early(struct kmem_cache * s,struct slab * slab)2269 static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
2270 {
2271 	void *addr;
2272 	unsigned long obj_exts;
2273 
2274 	/* Initialize stride early to avoid memory ordering issues */
2275 	slab_set_stride(slab, sizeof(struct slabobj_ext));
2276 
2277 	if (!need_slab_obj_exts(s))
2278 		return;
2279 
2280 	if (obj_exts_fit_within_slab_leftover(s, slab)) {
2281 		addr = slab_address(slab) + obj_exts_offset_in_slab(s, slab);
2282 		addr = kasan_reset_tag(addr);
2283 		obj_exts = (unsigned long)addr;
2284 
2285 		get_slab_obj_exts(obj_exts);
2286 		memset(addr, 0, obj_exts_size_in_slab(slab));
2287 		put_slab_obj_exts(obj_exts);
2288 
2289 #ifdef CONFIG_MEMCG
2290 		obj_exts |= MEMCG_DATA_OBJEXTS;
2291 #endif
2292 		slab->obj_exts = obj_exts;
2293 	} else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
2294 		unsigned int offset = obj_exts_offset_in_object(s);
2295 
2296 		obj_exts = (unsigned long)slab_address(slab);
2297 		obj_exts += s->red_left_pad;
2298 		obj_exts += offset;
2299 
2300 		get_slab_obj_exts(obj_exts);
2301 		for_each_object(addr, s, slab_address(slab), slab->objects)
2302 			memset(kasan_reset_tag(addr) + offset, 0,
2303 			       sizeof(struct slabobj_ext));
2304 		put_slab_obj_exts(obj_exts);
2305 
2306 #ifdef CONFIG_MEMCG
2307 		obj_exts |= MEMCG_DATA_OBJEXTS;
2308 #endif
2309 		slab->obj_exts = obj_exts;
2310 		slab_set_stride(slab, s->size);
2311 	}
2312 }
2313 
2314 #else /* CONFIG_SLAB_OBJ_EXT */
2315 
mark_obj_codetag_empty(const void * obj)2316 static inline void mark_obj_codetag_empty(const void *obj)
2317 {
2318 }
2319 
init_slab_obj_exts(struct slab * slab)2320 static inline void init_slab_obj_exts(struct slab *slab)
2321 {
2322 }
2323 
alloc_slab_obj_exts(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab)2324 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2325 			       gfp_t gfp, bool new_slab)
2326 {
2327 	return 0;
2328 }
2329 
free_slab_obj_exts(struct slab * slab,bool allow_spin)2330 static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
2331 {
2332 }
2333 
alloc_slab_obj_exts_early(struct kmem_cache * s,struct slab * slab)2334 static inline void alloc_slab_obj_exts_early(struct kmem_cache *s,
2335 						       struct slab *slab)
2336 {
2337 }
2338 
2339 #endif /* CONFIG_SLAB_OBJ_EXT */
2340 
2341 #ifdef CONFIG_MEM_ALLOC_PROFILING
2342 
2343 static inline unsigned long
prepare_slab_obj_exts_hook(struct kmem_cache * s,struct slab * slab,gfp_t flags,void * p)2344 prepare_slab_obj_exts_hook(struct kmem_cache *s, struct slab *slab,
2345 			   gfp_t flags, void *p)
2346 {
2347 	if (!slab_obj_exts(slab) &&
2348 	    alloc_slab_obj_exts(slab, s, flags, false)) {
2349 		pr_warn_once("%s, %s: Failed to create slab extension vector!\n",
2350 			     __func__, s->name);
2351 		return 0;
2352 	}
2353 
2354 	return slab_obj_exts(slab);
2355 }
2356 
2357 
2358 /* Should be called only if mem_alloc_profiling_enabled() */
2359 static noinline void
__alloc_tagging_slab_alloc_hook(struct kmem_cache * s,void * object,gfp_t flags)2360 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2361 {
2362 	unsigned long obj_exts;
2363 	struct slabobj_ext *obj_ext;
2364 	struct slab *slab;
2365 
2366 	if (!object)
2367 		return;
2368 
2369 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2370 		return;
2371 
2372 	if (flags & __GFP_NO_OBJ_EXT)
2373 		return;
2374 
2375 	slab = virt_to_slab(object);
2376 	obj_exts = prepare_slab_obj_exts_hook(s, slab, flags, object);
2377 	/*
2378 	 * Currently obj_exts is used only for allocation profiling.
2379 	 * If other users appear then mem_alloc_profiling_enabled()
2380 	 * check should be added before alloc_tag_add().
2381 	 */
2382 	if (obj_exts) {
2383 		unsigned int obj_idx = obj_to_index(s, slab, object);
2384 
2385 		get_slab_obj_exts(obj_exts);
2386 		obj_ext = slab_obj_ext(slab, obj_exts, obj_idx);
2387 		alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
2388 		put_slab_obj_exts(obj_exts);
2389 	} else {
2390 		alloc_tag_set_inaccurate(current->alloc_tag);
2391 	}
2392 }
2393 
2394 static inline void
alloc_tagging_slab_alloc_hook(struct kmem_cache * s,void * object,gfp_t flags)2395 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2396 {
2397 	if (mem_alloc_profiling_enabled())
2398 		__alloc_tagging_slab_alloc_hook(s, object, flags);
2399 }
2400 
2401 /* Should be called only if mem_alloc_profiling_enabled() */
2402 static noinline void
__alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2403 __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2404 			       int objects)
2405 {
2406 	int i;
2407 	unsigned long obj_exts;
2408 
2409 	/* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
2410 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2411 		return;
2412 
2413 	obj_exts = slab_obj_exts(slab);
2414 	if (!obj_exts)
2415 		return;
2416 
2417 	get_slab_obj_exts(obj_exts);
2418 	for (i = 0; i < objects; i++) {
2419 		unsigned int off = obj_to_index(s, slab, p[i]);
2420 
2421 		alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
2422 	}
2423 	put_slab_obj_exts(obj_exts);
2424 }
2425 
2426 static inline void
alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2427 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2428 			     int objects)
2429 {
2430 	if (mem_alloc_profiling_enabled())
2431 		__alloc_tagging_slab_free_hook(s, slab, p, objects);
2432 }
2433 
2434 #else /* CONFIG_MEM_ALLOC_PROFILING */
2435 
2436 static inline void
alloc_tagging_slab_alloc_hook(struct kmem_cache * s,void * object,gfp_t flags)2437 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2438 {
2439 }
2440 
2441 static inline void
alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2442 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2443 			     int objects)
2444 {
2445 }
2446 
2447 #endif /* CONFIG_MEM_ALLOC_PROFILING */
2448 
2449 
2450 #ifdef CONFIG_MEMCG
2451 
2452 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
2453 
2454 static __fastpath_inline
memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)2455 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2456 				gfp_t flags, size_t size, void **p)
2457 {
2458 	if (likely(!memcg_kmem_online()))
2459 		return true;
2460 
2461 	if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2462 		return true;
2463 
2464 	if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
2465 		return true;
2466 
2467 	if (likely(size == 1)) {
2468 		memcg_alloc_abort_single(s, *p);
2469 		*p = NULL;
2470 	} else {
2471 		kmem_cache_free_bulk(s, size, p);
2472 	}
2473 
2474 	return false;
2475 }
2476 
2477 static __fastpath_inline
memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2478 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2479 			  int objects)
2480 {
2481 	unsigned long obj_exts;
2482 
2483 	if (!memcg_kmem_online())
2484 		return;
2485 
2486 	obj_exts = slab_obj_exts(slab);
2487 	if (likely(!obj_exts))
2488 		return;
2489 
2490 	get_slab_obj_exts(obj_exts);
2491 	__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2492 	put_slab_obj_exts(obj_exts);
2493 }
2494 
2495 static __fastpath_inline
memcg_slab_post_charge(void * p,gfp_t flags)2496 bool memcg_slab_post_charge(void *p, gfp_t flags)
2497 {
2498 	unsigned long obj_exts;
2499 	struct slabobj_ext *obj_ext;
2500 	struct kmem_cache *s;
2501 	struct page *page;
2502 	struct slab *slab;
2503 	unsigned long off;
2504 
2505 	page = virt_to_page(p);
2506 	if (PageLargeKmalloc(page)) {
2507 		unsigned int order;
2508 		int size;
2509 
2510 		if (PageMemcgKmem(page))
2511 			return true;
2512 
2513 		order = large_kmalloc_order(page);
2514 		if (__memcg_kmem_charge_page(page, flags, order))
2515 			return false;
2516 
2517 		/*
2518 		 * This page has already been accounted in the global stats but
2519 		 * not in the memcg stats. So, subtract from the global and use
2520 		 * the interface which adds to both global and memcg stats.
2521 		 */
2522 		size = PAGE_SIZE << order;
2523 		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -size);
2524 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, size);
2525 		return true;
2526 	}
2527 
2528 	slab = page_slab(page);
2529 	s = slab->slab_cache;
2530 
2531 	/*
2532 	 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
2533 	 * of slab_obj_exts being allocated from the same slab and thus the slab
2534 	 * becoming effectively unfreeable.
2535 	 */
2536 	if (is_kmalloc_normal(s))
2537 		return true;
2538 
2539 	/* Ignore already charged objects. */
2540 	obj_exts = slab_obj_exts(slab);
2541 	if (obj_exts) {
2542 		get_slab_obj_exts(obj_exts);
2543 		off = obj_to_index(s, slab, p);
2544 		obj_ext = slab_obj_ext(slab, obj_exts, off);
2545 		if (unlikely(obj_ext->objcg)) {
2546 			put_slab_obj_exts(obj_exts);
2547 			return true;
2548 		}
2549 		put_slab_obj_exts(obj_exts);
2550 	}
2551 
2552 	return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
2553 }
2554 
2555 #else /* CONFIG_MEMCG */
memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)2556 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
2557 					      struct list_lru *lru,
2558 					      gfp_t flags, size_t size,
2559 					      void **p)
2560 {
2561 	return true;
2562 }
2563 
memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)2564 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2565 					void **p, int objects)
2566 {
2567 }
2568 
memcg_slab_post_charge(void * p,gfp_t flags)2569 static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
2570 {
2571 	return true;
2572 }
2573 #endif /* CONFIG_MEMCG */
2574 
2575 #ifdef CONFIG_SLUB_RCU_DEBUG
2576 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
2577 
2578 struct rcu_delayed_free {
2579 	struct rcu_head head;
2580 	void *object;
2581 };
2582 #endif
2583 
2584 /*
2585  * Hooks for other subsystems that check memory allocations. In a typical
2586  * production configuration these hooks all should produce no code at all.
2587  *
2588  * Returns true if freeing of the object can proceed, false if its reuse
2589  * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
2590  * to KFENCE.
2591  *
2592  * For objects allocated via kmalloc_nolock(), only a subset of alloc hooks
2593  * are invoked, so some free hooks must handle asymmetric hook calls.
2594  *
2595  * Alloc hooks called for kmalloc_nolock():
2596  * - kmsan_slab_alloc()
2597  * - kasan_slab_alloc()
2598  * - memcg_slab_post_alloc_hook()
2599  * - alloc_tagging_slab_alloc_hook()
2600  *
2601  * Free hooks that must handle missing corresponding alloc hooks:
2602  * - kmemleak_free_recursive()
2603  * - kfence_free()
2604  *
2605  * Free hooks that have no alloc hook counterpart, and thus safe to call:
2606  * - debug_check_no_locks_freed()
2607  * - debug_check_no_obj_freed()
2608  * - __kcsan_check_access()
2609  */
2610 static __always_inline
slab_free_hook(struct kmem_cache * s,void * x,bool init,bool after_rcu_delay)2611 bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
2612 		    bool after_rcu_delay)
2613 {
2614 	/* Are the object contents still accessible? */
2615 	bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
2616 
2617 	kmemleak_free_recursive(x, s->flags);
2618 	kmsan_slab_free(s, x);
2619 
2620 	debug_check_no_locks_freed(x, s->object_size);
2621 
2622 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
2623 		debug_check_no_obj_freed(x, s->object_size);
2624 
2625 	/* Use KCSAN to help debug racy use-after-free. */
2626 	if (!still_accessible)
2627 		__kcsan_check_access(x, s->object_size,
2628 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2629 
2630 	if (kfence_free(x))
2631 		return false;
2632 
2633 	/*
2634 	 * Give KASAN a chance to notice an invalid free operation before we
2635 	 * modify the object.
2636 	 */
2637 	if (kasan_slab_pre_free(s, x))
2638 		return false;
2639 
2640 #ifdef CONFIG_SLUB_RCU_DEBUG
2641 	if (still_accessible) {
2642 		struct rcu_delayed_free *delayed_free;
2643 
2644 		delayed_free = kmalloc_obj(*delayed_free, GFP_NOWAIT);
2645 		if (delayed_free) {
2646 			/*
2647 			 * Let KASAN track our call stack as a "related work
2648 			 * creation", just like if the object had been freed
2649 			 * normally via kfree_rcu().
2650 			 * We have to do this manually because the rcu_head is
2651 			 * not located inside the object.
2652 			 */
2653 			kasan_record_aux_stack(x);
2654 
2655 			delayed_free->object = x;
2656 			call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
2657 			return false;
2658 		}
2659 	}
2660 #endif /* CONFIG_SLUB_RCU_DEBUG */
2661 
2662 	/*
2663 	 * As memory initialization might be integrated into KASAN,
2664 	 * kasan_slab_free and initialization memset's must be
2665 	 * kept together to avoid discrepancies in behavior.
2666 	 *
2667 	 * The initialization memset's clear the object and the metadata,
2668 	 * but don't touch the SLAB redzone.
2669 	 *
2670 	 * The object's freepointer is also avoided if stored outside the
2671 	 * object.
2672 	 */
2673 	if (unlikely(init)) {
2674 		int rsize;
2675 		unsigned int inuse, orig_size;
2676 
2677 		inuse = get_info_end(s);
2678 		orig_size = get_orig_size(s, x);
2679 		if (!kasan_has_integrated_init())
2680 			memset(kasan_reset_tag(x), 0, orig_size);
2681 		rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2682 		memset((char *)kasan_reset_tag(x) + inuse, 0,
2683 		       s->size - inuse - rsize);
2684 		/*
2685 		 * Restore orig_size, otherwise kmalloc redzone overwritten
2686 		 * would be reported
2687 		 */
2688 		set_orig_size(s, x, orig_size);
2689 
2690 	}
2691 	/* KASAN might put x into memory quarantine, delaying its reuse. */
2692 	return !kasan_slab_free(s, x, init, still_accessible, false);
2693 }
2694 
2695 static __fastpath_inline
slab_free_freelist_hook(struct kmem_cache * s,void ** head,void ** tail,int * cnt)2696 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
2697 			     int *cnt)
2698 {
2699 
2700 	void *object;
2701 	void *next = *head;
2702 	void *old_tail = *tail;
2703 	bool init;
2704 
2705 	if (is_kfence_address(next)) {
2706 		slab_free_hook(s, next, false, false);
2707 		return false;
2708 	}
2709 
2710 	/* Head and tail of the reconstructed freelist */
2711 	*head = NULL;
2712 	*tail = NULL;
2713 
2714 	init = slab_want_init_on_free(s);
2715 
2716 	do {
2717 		object = next;
2718 		next = get_freepointer(s, object);
2719 
2720 		/* If object's reuse doesn't have to be delayed */
2721 		if (likely(slab_free_hook(s, object, init, false))) {
2722 			/* Move object to the new freelist */
2723 			set_freepointer(s, object, *head);
2724 			*head = object;
2725 			if (!*tail)
2726 				*tail = object;
2727 		} else {
2728 			/*
2729 			 * Adjust the reconstructed freelist depth
2730 			 * accordingly if object's reuse is delayed.
2731 			 */
2732 			--(*cnt);
2733 		}
2734 	} while (object != old_tail);
2735 
2736 	return *head != NULL;
2737 }
2738 
setup_object(struct kmem_cache * s,void * object)2739 static void *setup_object(struct kmem_cache *s, void *object)
2740 {
2741 	setup_object_debug(s, object);
2742 	object = kasan_init_slab_obj(s, object);
2743 	if (unlikely(s->ctor)) {
2744 		kasan_unpoison_new_object(s, object);
2745 		s->ctor(object);
2746 		kasan_poison_new_object(s, object);
2747 	}
2748 	return object;
2749 }
2750 
__alloc_empty_sheaf(struct kmem_cache * s,gfp_t gfp,unsigned int capacity)2751 static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp,
2752 					      unsigned int capacity)
2753 {
2754 	struct slab_sheaf *sheaf;
2755 	size_t sheaf_size;
2756 
2757 	if (gfp & __GFP_NO_OBJ_EXT)
2758 		return NULL;
2759 
2760 	gfp &= ~OBJCGS_CLEAR_MASK;
2761 
2762 	/*
2763 	 * Prevent recursion to the same cache, or a deep stack of kmallocs of
2764 	 * varying sizes (sheaf capacity might differ for each kmalloc size
2765 	 * bucket)
2766 	 */
2767 	if (s->flags & SLAB_KMALLOC)
2768 		gfp |= __GFP_NO_OBJ_EXT;
2769 
2770 	sheaf_size = struct_size(sheaf, objects, capacity);
2771 	sheaf = kzalloc(sheaf_size, gfp);
2772 
2773 	if (unlikely(!sheaf))
2774 		return NULL;
2775 
2776 	sheaf->cache = s;
2777 
2778 	stat(s, SHEAF_ALLOC);
2779 
2780 	return sheaf;
2781 }
2782 
alloc_empty_sheaf(struct kmem_cache * s,gfp_t gfp)2783 static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
2784 						   gfp_t gfp)
2785 {
2786 	return __alloc_empty_sheaf(s, gfp, s->sheaf_capacity);
2787 }
2788 
free_empty_sheaf(struct kmem_cache * s,struct slab_sheaf * sheaf)2789 static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
2790 {
2791 	/*
2792 	 * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
2793 	 * corresponding extension is NULL and alloc_tag_sub() will throw a
2794 	 * warning, therefore replace NULL with CODETAG_EMPTY to indicate
2795 	 * that the extension for this sheaf is expected to be NULL.
2796 	 */
2797 	if (s->flags & SLAB_KMALLOC)
2798 		mark_obj_codetag_empty(sheaf);
2799 
2800 	kfree(sheaf);
2801 
2802 	stat(s, SHEAF_FREE);
2803 }
2804 
2805 static unsigned int
2806 refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
2807 	       unsigned int max);
2808 
refill_sheaf(struct kmem_cache * s,struct slab_sheaf * sheaf,gfp_t gfp)2809 static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf,
2810 			 gfp_t gfp)
2811 {
2812 	int to_fill = s->sheaf_capacity - sheaf->size;
2813 	int filled;
2814 
2815 	if (!to_fill)
2816 		return 0;
2817 
2818 	filled = refill_objects(s, &sheaf->objects[sheaf->size], gfp, to_fill,
2819 				to_fill);
2820 
2821 	sheaf->size += filled;
2822 
2823 	stat_add(s, SHEAF_REFILL, filled);
2824 
2825 	if (filled < to_fill)
2826 		return -ENOMEM;
2827 
2828 	return 0;
2829 }
2830 
2831 
alloc_full_sheaf(struct kmem_cache * s,gfp_t gfp)2832 static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
2833 {
2834 	struct slab_sheaf *sheaf = alloc_empty_sheaf(s, gfp);
2835 
2836 	if (!sheaf)
2837 		return NULL;
2838 
2839 	if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
2840 		free_empty_sheaf(s, sheaf);
2841 		return NULL;
2842 	}
2843 
2844 	return sheaf;
2845 }
2846 
2847 /*
2848  * Maximum number of objects freed during a single flush of main pcs sheaf.
2849  * Translates directly to an on-stack array size.
2850  */
2851 #define PCS_BATCH_MAX	32U
2852 
2853 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
2854 
2855 /*
2856  * Free all objects from the main sheaf. In order to perform
2857  * __kmem_cache_free_bulk() outside of cpu_sheaves->lock, work in batches where
2858  * object pointers are moved to a on-stack array under the lock. To bound the
2859  * stack usage, limit each batch to PCS_BATCH_MAX.
2860  *
2861  * Must be called with s->cpu_sheaves->lock locked, returns with the lock
2862  * unlocked.
2863  *
2864  * Returns how many objects are remaining to be flushed
2865  */
__sheaf_flush_main_batch(struct kmem_cache * s)2866 static unsigned int __sheaf_flush_main_batch(struct kmem_cache *s)
2867 {
2868 	struct slub_percpu_sheaves *pcs;
2869 	unsigned int batch, remaining;
2870 	void *objects[PCS_BATCH_MAX];
2871 	struct slab_sheaf *sheaf;
2872 
2873 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
2874 
2875 	pcs = this_cpu_ptr(s->cpu_sheaves);
2876 	sheaf = pcs->main;
2877 
2878 	batch = min(PCS_BATCH_MAX, sheaf->size);
2879 
2880 	sheaf->size -= batch;
2881 	memcpy(objects, sheaf->objects + sheaf->size, batch * sizeof(void *));
2882 
2883 	remaining = sheaf->size;
2884 
2885 	local_unlock(&s->cpu_sheaves->lock);
2886 
2887 	__kmem_cache_free_bulk(s, batch, &objects[0]);
2888 
2889 	stat_add(s, SHEAF_FLUSH, batch);
2890 
2891 	return remaining;
2892 }
2893 
sheaf_flush_main(struct kmem_cache * s)2894 static void sheaf_flush_main(struct kmem_cache *s)
2895 {
2896 	unsigned int remaining;
2897 
2898 	do {
2899 		local_lock(&s->cpu_sheaves->lock);
2900 
2901 		remaining = __sheaf_flush_main_batch(s);
2902 
2903 	} while (remaining);
2904 }
2905 
2906 /*
2907  * Returns true if the main sheaf was at least partially flushed.
2908  */
sheaf_try_flush_main(struct kmem_cache * s)2909 static bool sheaf_try_flush_main(struct kmem_cache *s)
2910 {
2911 	unsigned int remaining;
2912 	bool ret = false;
2913 
2914 	do {
2915 		if (!local_trylock(&s->cpu_sheaves->lock))
2916 			return ret;
2917 
2918 		ret = true;
2919 		remaining = __sheaf_flush_main_batch(s);
2920 
2921 	} while (remaining);
2922 
2923 	return ret;
2924 }
2925 
2926 /*
2927  * Free all objects from a sheaf that's unused, i.e. not linked to any
2928  * cpu_sheaves, so we need no locking and batching. The locking is also not
2929  * necessary when flushing cpu's sheaves (both spare and main) during cpu
2930  * hotremove as the cpu is not executing anymore.
2931  */
sheaf_flush_unused(struct kmem_cache * s,struct slab_sheaf * sheaf)2932 static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf)
2933 {
2934 	if (!sheaf->size)
2935 		return;
2936 
2937 	stat_add(s, SHEAF_FLUSH, sheaf->size);
2938 
2939 	__kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
2940 
2941 	sheaf->size = 0;
2942 }
2943 
__rcu_free_sheaf_prepare(struct kmem_cache * s,struct slab_sheaf * sheaf)2944 static bool __rcu_free_sheaf_prepare(struct kmem_cache *s,
2945 				     struct slab_sheaf *sheaf)
2946 {
2947 	bool init = slab_want_init_on_free(s);
2948 	void **p = &sheaf->objects[0];
2949 	unsigned int i = 0;
2950 	bool pfmemalloc = false;
2951 
2952 	while (i < sheaf->size) {
2953 		struct slab *slab = virt_to_slab(p[i]);
2954 
2955 		memcg_slab_free_hook(s, slab, p + i, 1);
2956 		alloc_tagging_slab_free_hook(s, slab, p + i, 1);
2957 
2958 		if (unlikely(!slab_free_hook(s, p[i], init, true))) {
2959 			p[i] = p[--sheaf->size];
2960 			continue;
2961 		}
2962 
2963 		if (slab_test_pfmemalloc(slab))
2964 			pfmemalloc = true;
2965 
2966 		i++;
2967 	}
2968 
2969 	return pfmemalloc;
2970 }
2971 
rcu_free_sheaf_nobarn(struct rcu_head * head)2972 static void rcu_free_sheaf_nobarn(struct rcu_head *head)
2973 {
2974 	struct slab_sheaf *sheaf;
2975 	struct kmem_cache *s;
2976 
2977 	sheaf = container_of(head, struct slab_sheaf, rcu_head);
2978 	s = sheaf->cache;
2979 
2980 	__rcu_free_sheaf_prepare(s, sheaf);
2981 
2982 	sheaf_flush_unused(s, sheaf);
2983 
2984 	free_empty_sheaf(s, sheaf);
2985 }
2986 
2987 /*
2988  * Caller needs to make sure migration is disabled in order to fully flush
2989  * single cpu's sheaves
2990  *
2991  * must not be called from an irq
2992  *
2993  * flushing operations are rare so let's keep it simple and flush to slabs
2994  * directly, skipping the barn
2995  */
pcs_flush_all(struct kmem_cache * s)2996 static void pcs_flush_all(struct kmem_cache *s)
2997 {
2998 	struct slub_percpu_sheaves *pcs;
2999 	struct slab_sheaf *spare, *rcu_free;
3000 
3001 	local_lock(&s->cpu_sheaves->lock);
3002 	pcs = this_cpu_ptr(s->cpu_sheaves);
3003 
3004 	spare = pcs->spare;
3005 	pcs->spare = NULL;
3006 
3007 	rcu_free = pcs->rcu_free;
3008 	pcs->rcu_free = NULL;
3009 
3010 	local_unlock(&s->cpu_sheaves->lock);
3011 
3012 	if (spare) {
3013 		sheaf_flush_unused(s, spare);
3014 		free_empty_sheaf(s, spare);
3015 	}
3016 
3017 	if (rcu_free)
3018 		call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
3019 
3020 	sheaf_flush_main(s);
3021 }
3022 
__pcs_flush_all_cpu(struct kmem_cache * s,unsigned int cpu)3023 static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu)
3024 {
3025 	struct slub_percpu_sheaves *pcs;
3026 
3027 	pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3028 
3029 	/* The cpu is not executing anymore so we don't need pcs->lock */
3030 	sheaf_flush_unused(s, pcs->main);
3031 	if (pcs->spare) {
3032 		sheaf_flush_unused(s, pcs->spare);
3033 		free_empty_sheaf(s, pcs->spare);
3034 		pcs->spare = NULL;
3035 	}
3036 
3037 	if (pcs->rcu_free) {
3038 		call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn);
3039 		pcs->rcu_free = NULL;
3040 	}
3041 }
3042 
pcs_destroy(struct kmem_cache * s)3043 static void pcs_destroy(struct kmem_cache *s)
3044 {
3045 	int cpu;
3046 
3047 	/*
3048 	 * We may be unwinding cache creation that failed before or during the
3049 	 * allocation of this.
3050 	 */
3051 	if (!s->cpu_sheaves)
3052 		return;
3053 
3054 	/* pcs->main can only point to the bootstrap sheaf, nothing to free */
3055 	if (!cache_has_sheaves(s))
3056 		goto free_pcs;
3057 
3058 	for_each_possible_cpu(cpu) {
3059 		struct slub_percpu_sheaves *pcs;
3060 
3061 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3062 
3063 		/* This can happen when unwinding failed cache creation. */
3064 		if (!pcs->main)
3065 			continue;
3066 
3067 		/*
3068 		 * We have already passed __kmem_cache_shutdown() so everything
3069 		 * was flushed and there should be no objects allocated from
3070 		 * slabs, otherwise kmem_cache_destroy() would have aborted.
3071 		 * Therefore something would have to be really wrong if the
3072 		 * warnings here trigger, and we should rather leave objects and
3073 		 * sheaves to leak in that case.
3074 		 */
3075 
3076 		WARN_ON(pcs->spare);
3077 		WARN_ON(pcs->rcu_free);
3078 
3079 		if (!WARN_ON(pcs->main->size)) {
3080 			free_empty_sheaf(s, pcs->main);
3081 			pcs->main = NULL;
3082 		}
3083 	}
3084 
3085 free_pcs:
3086 	free_percpu(s->cpu_sheaves);
3087 	s->cpu_sheaves = NULL;
3088 }
3089 
barn_get_empty_sheaf(struct node_barn * barn,bool allow_spin)3090 static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn,
3091 					       bool allow_spin)
3092 {
3093 	struct slab_sheaf *empty = NULL;
3094 	unsigned long flags;
3095 
3096 	if (!data_race(barn->nr_empty))
3097 		return NULL;
3098 
3099 	if (likely(allow_spin))
3100 		spin_lock_irqsave(&barn->lock, flags);
3101 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3102 		return NULL;
3103 
3104 	if (likely(barn->nr_empty)) {
3105 		empty = list_first_entry(&barn->sheaves_empty,
3106 					 struct slab_sheaf, barn_list);
3107 		list_del(&empty->barn_list);
3108 		barn->nr_empty--;
3109 	}
3110 
3111 	spin_unlock_irqrestore(&barn->lock, flags);
3112 
3113 	return empty;
3114 }
3115 
3116 /*
3117  * The following two functions are used mainly in cases where we have to undo an
3118  * intended action due to a race or cpu migration. Thus they do not check the
3119  * empty or full sheaf limits for simplicity.
3120  */
3121 
barn_put_empty_sheaf(struct node_barn * barn,struct slab_sheaf * sheaf)3122 static void barn_put_empty_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
3123 {
3124 	unsigned long flags;
3125 
3126 	spin_lock_irqsave(&barn->lock, flags);
3127 
3128 	list_add(&sheaf->barn_list, &barn->sheaves_empty);
3129 	barn->nr_empty++;
3130 
3131 	spin_unlock_irqrestore(&barn->lock, flags);
3132 }
3133 
barn_put_full_sheaf(struct node_barn * barn,struct slab_sheaf * sheaf)3134 static void barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
3135 {
3136 	unsigned long flags;
3137 
3138 	spin_lock_irqsave(&barn->lock, flags);
3139 
3140 	list_add(&sheaf->barn_list, &barn->sheaves_full);
3141 	barn->nr_full++;
3142 
3143 	spin_unlock_irqrestore(&barn->lock, flags);
3144 }
3145 
barn_get_full_or_empty_sheaf(struct node_barn * barn)3146 static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
3147 {
3148 	struct slab_sheaf *sheaf = NULL;
3149 	unsigned long flags;
3150 
3151 	if (!data_race(barn->nr_full) && !data_race(barn->nr_empty))
3152 		return NULL;
3153 
3154 	spin_lock_irqsave(&barn->lock, flags);
3155 
3156 	if (barn->nr_full) {
3157 		sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
3158 					barn_list);
3159 		list_del(&sheaf->barn_list);
3160 		barn->nr_full--;
3161 	} else if (barn->nr_empty) {
3162 		sheaf = list_first_entry(&barn->sheaves_empty,
3163 					 struct slab_sheaf, barn_list);
3164 		list_del(&sheaf->barn_list);
3165 		barn->nr_empty--;
3166 	}
3167 
3168 	spin_unlock_irqrestore(&barn->lock, flags);
3169 
3170 	return sheaf;
3171 }
3172 
3173 /*
3174  * If a full sheaf is available, return it and put the supplied empty one to
3175  * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't
3176  * change.
3177  */
3178 static struct slab_sheaf *
barn_replace_empty_sheaf(struct node_barn * barn,struct slab_sheaf * empty,bool allow_spin)3179 barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty,
3180 			 bool allow_spin)
3181 {
3182 	struct slab_sheaf *full = NULL;
3183 	unsigned long flags;
3184 
3185 	if (!data_race(barn->nr_full))
3186 		return NULL;
3187 
3188 	if (likely(allow_spin))
3189 		spin_lock_irqsave(&barn->lock, flags);
3190 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3191 		return NULL;
3192 
3193 	if (likely(barn->nr_full)) {
3194 		full = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
3195 					barn_list);
3196 		list_del(&full->barn_list);
3197 		list_add(&empty->barn_list, &barn->sheaves_empty);
3198 		barn->nr_full--;
3199 		barn->nr_empty++;
3200 	}
3201 
3202 	spin_unlock_irqrestore(&barn->lock, flags);
3203 
3204 	return full;
3205 }
3206 
3207 /*
3208  * If an empty sheaf is available, return it and put the supplied full one to
3209  * barn. But if there are too many full sheaves, reject this with -E2BIG.
3210  */
3211 static struct slab_sheaf *
barn_replace_full_sheaf(struct node_barn * barn,struct slab_sheaf * full,bool allow_spin)3212 barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full,
3213 			bool allow_spin)
3214 {
3215 	struct slab_sheaf *empty;
3216 	unsigned long flags;
3217 
3218 	/* we don't repeat this check under barn->lock as it's not critical */
3219 	if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES)
3220 		return ERR_PTR(-E2BIG);
3221 	if (!data_race(barn->nr_empty))
3222 		return ERR_PTR(-ENOMEM);
3223 
3224 	if (likely(allow_spin))
3225 		spin_lock_irqsave(&barn->lock, flags);
3226 	else if (!spin_trylock_irqsave(&barn->lock, flags))
3227 		return ERR_PTR(-EBUSY);
3228 
3229 	if (likely(barn->nr_empty)) {
3230 		empty = list_first_entry(&barn->sheaves_empty, struct slab_sheaf,
3231 					 barn_list);
3232 		list_del(&empty->barn_list);
3233 		list_add(&full->barn_list, &barn->sheaves_full);
3234 		barn->nr_empty--;
3235 		barn->nr_full++;
3236 	} else {
3237 		empty = ERR_PTR(-ENOMEM);
3238 	}
3239 
3240 	spin_unlock_irqrestore(&barn->lock, flags);
3241 
3242 	return empty;
3243 }
3244 
barn_init(struct node_barn * barn)3245 static void barn_init(struct node_barn *barn)
3246 {
3247 	spin_lock_init(&barn->lock);
3248 	INIT_LIST_HEAD(&barn->sheaves_full);
3249 	INIT_LIST_HEAD(&barn->sheaves_empty);
3250 	barn->nr_full = 0;
3251 	barn->nr_empty = 0;
3252 }
3253 
barn_shrink(struct kmem_cache * s,struct node_barn * barn)3254 static void barn_shrink(struct kmem_cache *s, struct node_barn *barn)
3255 {
3256 	LIST_HEAD(empty_list);
3257 	LIST_HEAD(full_list);
3258 	struct slab_sheaf *sheaf, *sheaf2;
3259 	unsigned long flags;
3260 
3261 	spin_lock_irqsave(&barn->lock, flags);
3262 
3263 	list_splice_init(&barn->sheaves_full, &full_list);
3264 	barn->nr_full = 0;
3265 	list_splice_init(&barn->sheaves_empty, &empty_list);
3266 	barn->nr_empty = 0;
3267 
3268 	spin_unlock_irqrestore(&barn->lock, flags);
3269 
3270 	list_for_each_entry_safe(sheaf, sheaf2, &full_list, barn_list) {
3271 		sheaf_flush_unused(s, sheaf);
3272 		free_empty_sheaf(s, sheaf);
3273 	}
3274 
3275 	list_for_each_entry_safe(sheaf, sheaf2, &empty_list, barn_list)
3276 		free_empty_sheaf(s, sheaf);
3277 }
3278 
3279 /*
3280  * Slab allocation and freeing
3281  */
alloc_slab_page(gfp_t flags,int node,struct kmem_cache_order_objects oo,bool allow_spin)3282 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
3283 					   struct kmem_cache_order_objects oo,
3284 					   bool allow_spin)
3285 {
3286 	struct page *page;
3287 	struct slab *slab;
3288 	unsigned int order = oo_order(oo);
3289 
3290 	if (unlikely(!allow_spin))
3291 		page = alloc_frozen_pages_nolock(0/* __GFP_COMP is implied */,
3292 								  node, order);
3293 	else if (node == NUMA_NO_NODE)
3294 		page = alloc_frozen_pages(flags, order);
3295 	else
3296 		page = __alloc_frozen_pages(flags, order, node, NULL);
3297 
3298 	if (!page)
3299 		return NULL;
3300 
3301 	__SetPageSlab(page);
3302 	slab = page_slab(page);
3303 	if (page_is_pfmemalloc(page))
3304 		slab_set_pfmemalloc(slab);
3305 
3306 	return slab;
3307 }
3308 
3309 #ifdef CONFIG_SLAB_FREELIST_RANDOM
3310 /* Pre-initialize the random sequence cache */
init_cache_random_seq(struct kmem_cache * s)3311 static int init_cache_random_seq(struct kmem_cache *s)
3312 {
3313 	unsigned int count = oo_objects(s->oo);
3314 	int err;
3315 
3316 	/* Bailout if already initialised */
3317 	if (s->random_seq)
3318 		return 0;
3319 
3320 	err = cache_random_seq_create(s, count, GFP_KERNEL);
3321 	if (err) {
3322 		pr_err("SLUB: Unable to initialize free list for %s\n",
3323 			s->name);
3324 		return err;
3325 	}
3326 
3327 	/* Transform to an offset on the set of pages */
3328 	if (s->random_seq) {
3329 		unsigned int i;
3330 
3331 		for (i = 0; i < count; i++)
3332 			s->random_seq[i] *= s->size;
3333 	}
3334 	return 0;
3335 }
3336 
3337 /* Initialize each random sequence freelist per cache */
init_freelist_randomization(void)3338 static void __init init_freelist_randomization(void)
3339 {
3340 	struct kmem_cache *s;
3341 
3342 	mutex_lock(&slab_mutex);
3343 
3344 	list_for_each_entry(s, &slab_caches, list)
3345 		init_cache_random_seq(s);
3346 
3347 	mutex_unlock(&slab_mutex);
3348 }
3349 
3350 /* Get the next entry on the pre-computed freelist randomized */
next_freelist_entry(struct kmem_cache * s,unsigned long * pos,void * start,unsigned long page_limit,unsigned long freelist_count)3351 static void *next_freelist_entry(struct kmem_cache *s,
3352 				unsigned long *pos, void *start,
3353 				unsigned long page_limit,
3354 				unsigned long freelist_count)
3355 {
3356 	unsigned int idx;
3357 
3358 	/*
3359 	 * If the target page allocation failed, the number of objects on the
3360 	 * page might be smaller than the usual size defined by the cache.
3361 	 */
3362 	do {
3363 		idx = s->random_seq[*pos];
3364 		*pos += 1;
3365 		if (*pos >= freelist_count)
3366 			*pos = 0;
3367 	} while (unlikely(idx >= page_limit));
3368 
3369 	return (char *)start + idx;
3370 }
3371 
3372 static DEFINE_PER_CPU(struct rnd_state, slab_rnd_state);
3373 
3374 /* Shuffle the single linked freelist based on a random pre-computed sequence */
shuffle_freelist(struct kmem_cache * s,struct slab * slab,bool allow_spin)3375 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
3376 			     bool allow_spin)
3377 {
3378 	void *start;
3379 	void *cur;
3380 	void *next;
3381 	unsigned long idx, pos, page_limit, freelist_count;
3382 
3383 	if (slab->objects < 2 || !s->random_seq)
3384 		return false;
3385 
3386 	freelist_count = oo_objects(s->oo);
3387 	if (allow_spin) {
3388 		pos = get_random_u32_below(freelist_count);
3389 	} else {
3390 		struct rnd_state *state;
3391 
3392 		/*
3393 		 * An interrupt or NMI handler might interrupt and change
3394 		 * the state in the middle, but that's safe.
3395 		 */
3396 		state = &get_cpu_var(slab_rnd_state);
3397 		pos = prandom_u32_state(state) % freelist_count;
3398 		put_cpu_var(slab_rnd_state);
3399 	}
3400 
3401 	page_limit = slab->objects * s->size;
3402 	start = fixup_red_left(s, slab_address(slab));
3403 
3404 	/* First entry is used as the base of the freelist */
3405 	cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
3406 	cur = setup_object(s, cur);
3407 	slab->freelist = cur;
3408 
3409 	for (idx = 1; idx < slab->objects; idx++) {
3410 		next = next_freelist_entry(s, &pos, start, page_limit,
3411 			freelist_count);
3412 		next = setup_object(s, next);
3413 		set_freepointer(s, cur, next);
3414 		cur = next;
3415 	}
3416 	set_freepointer(s, cur, NULL);
3417 
3418 	return true;
3419 }
3420 #else
init_cache_random_seq(struct kmem_cache * s)3421 static inline int init_cache_random_seq(struct kmem_cache *s)
3422 {
3423 	return 0;
3424 }
init_freelist_randomization(void)3425 static inline void init_freelist_randomization(void) { }
shuffle_freelist(struct kmem_cache * s,struct slab * slab,bool allow_spin)3426 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
3427 				    bool allow_spin)
3428 {
3429 	return false;
3430 }
3431 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
3432 
account_slab(struct slab * slab,int order,struct kmem_cache * s,gfp_t gfp)3433 static __always_inline void account_slab(struct slab *slab, int order,
3434 					 struct kmem_cache *s, gfp_t gfp)
3435 {
3436 	if (memcg_kmem_online() &&
3437 			(s->flags & SLAB_ACCOUNT) &&
3438 			!slab_obj_exts(slab))
3439 		alloc_slab_obj_exts(slab, s, gfp, true);
3440 
3441 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
3442 			    PAGE_SIZE << order);
3443 }
3444 
unaccount_slab(struct slab * slab,int order,struct kmem_cache * s,bool allow_spin)3445 static __always_inline void unaccount_slab(struct slab *slab, int order,
3446 					   struct kmem_cache *s, bool allow_spin)
3447 {
3448 	/*
3449 	 * The slab object extensions should now be freed regardless of
3450 	 * whether mem_alloc_profiling_enabled() or not because profiling
3451 	 * might have been disabled after slab->obj_exts got allocated.
3452 	 */
3453 	free_slab_obj_exts(slab, allow_spin);
3454 
3455 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
3456 			    -(PAGE_SIZE << order));
3457 }
3458 
allocate_slab(struct kmem_cache * s,gfp_t flags,int node)3459 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
3460 {
3461 	bool allow_spin = gfpflags_allow_spinning(flags);
3462 	struct slab *slab;
3463 	struct kmem_cache_order_objects oo = s->oo;
3464 	gfp_t alloc_gfp;
3465 	void *start, *p, *next;
3466 	int idx;
3467 	bool shuffle;
3468 
3469 	flags &= gfp_allowed_mask;
3470 
3471 	flags |= s->allocflags;
3472 
3473 	/*
3474 	 * Let the initial higher-order allocation fail under memory pressure
3475 	 * so we fall-back to the minimum order allocation.
3476 	 */
3477 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
3478 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
3479 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
3480 
3481 	/*
3482 	 * __GFP_RECLAIM could be cleared on the first allocation attempt,
3483 	 * so pass allow_spin flag directly.
3484 	 */
3485 	slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
3486 	if (unlikely(!slab)) {
3487 		oo = s->min;
3488 		alloc_gfp = flags;
3489 		/*
3490 		 * Allocation may have failed due to fragmentation.
3491 		 * Try a lower order alloc if possible
3492 		 */
3493 		slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
3494 		if (unlikely(!slab))
3495 			return NULL;
3496 		stat(s, ORDER_FALLBACK);
3497 	}
3498 
3499 	slab->objects = oo_objects(oo);
3500 	slab->inuse = 0;
3501 	slab->frozen = 0;
3502 
3503 	slab->slab_cache = s;
3504 
3505 	kasan_poison_slab(slab);
3506 
3507 	start = slab_address(slab);
3508 
3509 	setup_slab_debug(s, slab, start);
3510 	init_slab_obj_exts(slab);
3511 	/*
3512 	 * Poison the slab before initializing the slabobj_ext array
3513 	 * to prevent the array from being overwritten.
3514 	 */
3515 	alloc_slab_obj_exts_early(s, slab);
3516 	account_slab(slab, oo_order(oo), s, flags);
3517 
3518 	shuffle = shuffle_freelist(s, slab, allow_spin);
3519 
3520 	if (!shuffle) {
3521 		start = fixup_red_left(s, start);
3522 		start = setup_object(s, start);
3523 		slab->freelist = start;
3524 		for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
3525 			next = p + s->size;
3526 			next = setup_object(s, next);
3527 			set_freepointer(s, p, next);
3528 			p = next;
3529 		}
3530 		set_freepointer(s, p, NULL);
3531 	}
3532 
3533 	return slab;
3534 }
3535 
new_slab(struct kmem_cache * s,gfp_t flags,int node)3536 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
3537 {
3538 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
3539 		flags = kmalloc_fix_flags(flags);
3540 
3541 	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
3542 
3543 	return allocate_slab(s,
3544 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
3545 }
3546 
__free_slab(struct kmem_cache * s,struct slab * slab,bool allow_spin)3547 static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin)
3548 {
3549 	struct page *page = slab_page(slab);
3550 	int order = compound_order(page);
3551 	int pages = 1 << order;
3552 
3553 	__slab_clear_pfmemalloc(slab);
3554 	page->mapping = NULL;
3555 	__ClearPageSlab(page);
3556 	mm_account_reclaimed_pages(pages);
3557 	unaccount_slab(slab, order, s, allow_spin);
3558 	if (allow_spin)
3559 		free_frozen_pages(page, order);
3560 	else
3561 		free_frozen_pages_nolock(page, order);
3562 }
3563 
free_new_slab_nolock(struct kmem_cache * s,struct slab * slab)3564 static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab)
3565 {
3566 	/*
3567 	 * Since it was just allocated, we can skip the actions in
3568 	 * discard_slab() and free_slab().
3569 	 */
3570 	__free_slab(s, slab, false);
3571 }
3572 
rcu_free_slab(struct rcu_head * h)3573 static void rcu_free_slab(struct rcu_head *h)
3574 {
3575 	struct slab *slab = container_of(h, struct slab, rcu_head);
3576 
3577 	__free_slab(slab->slab_cache, slab, true);
3578 }
3579 
free_slab(struct kmem_cache * s,struct slab * slab)3580 static void free_slab(struct kmem_cache *s, struct slab *slab)
3581 {
3582 	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
3583 		void *p;
3584 
3585 		slab_pad_check(s, slab);
3586 		for_each_object(p, s, slab_address(slab), slab->objects)
3587 			check_object(s, slab, p, SLUB_RED_INACTIVE);
3588 	}
3589 
3590 	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
3591 		call_rcu(&slab->rcu_head, rcu_free_slab);
3592 	else
3593 		__free_slab(s, slab, true);
3594 }
3595 
discard_slab(struct kmem_cache * s,struct slab * slab)3596 static void discard_slab(struct kmem_cache *s, struct slab *slab)
3597 {
3598 	dec_slabs_node(s, slab_nid(slab), slab->objects);
3599 	free_slab(s, slab);
3600 }
3601 
slab_test_node_partial(const struct slab * slab)3602 static inline bool slab_test_node_partial(const struct slab *slab)
3603 {
3604 	return test_bit(SL_partial, &slab->flags.f);
3605 }
3606 
slab_set_node_partial(struct slab * slab)3607 static inline void slab_set_node_partial(struct slab *slab)
3608 {
3609 	set_bit(SL_partial, &slab->flags.f);
3610 }
3611 
slab_clear_node_partial(struct slab * slab)3612 static inline void slab_clear_node_partial(struct slab *slab)
3613 {
3614 	clear_bit(SL_partial, &slab->flags.f);
3615 }
3616 
3617 /*
3618  * Management of partially allocated slabs.
3619  */
3620 static inline void
__add_partial(struct kmem_cache_node * n,struct slab * slab,enum add_mode mode)3621 __add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode)
3622 {
3623 	n->nr_partial++;
3624 	if (mode == ADD_TO_TAIL)
3625 		list_add_tail(&slab->slab_list, &n->partial);
3626 	else
3627 		list_add(&slab->slab_list, &n->partial);
3628 	slab_set_node_partial(slab);
3629 }
3630 
add_partial(struct kmem_cache_node * n,struct slab * slab,enum add_mode mode)3631 static inline void add_partial(struct kmem_cache_node *n,
3632 				struct slab *slab, enum add_mode mode)
3633 {
3634 	lockdep_assert_held(&n->list_lock);
3635 	__add_partial(n, slab, mode);
3636 }
3637 
remove_partial(struct kmem_cache_node * n,struct slab * slab)3638 static inline void remove_partial(struct kmem_cache_node *n,
3639 					struct slab *slab)
3640 {
3641 	lockdep_assert_held(&n->list_lock);
3642 	list_del(&slab->slab_list);
3643 	slab_clear_node_partial(slab);
3644 	n->nr_partial--;
3645 }
3646 
3647 /*
3648  * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
3649  * slab from the n->partial list. Remove only a single object from the slab, do
3650  * the alloc_debug_processing() checks and leave the slab on the list, or move
3651  * it to full list if it was the last free object.
3652  */
alloc_single_from_partial(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab,int orig_size)3653 static void *alloc_single_from_partial(struct kmem_cache *s,
3654 		struct kmem_cache_node *n, struct slab *slab, int orig_size)
3655 {
3656 	void *object;
3657 
3658 	lockdep_assert_held(&n->list_lock);
3659 
3660 #ifdef CONFIG_SLUB_DEBUG
3661 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3662 		if (!validate_slab_ptr(slab)) {
3663 			slab_err(s, slab, "Not a valid slab page");
3664 			return NULL;
3665 		}
3666 	}
3667 #endif
3668 
3669 	object = slab->freelist;
3670 	slab->freelist = get_freepointer(s, object);
3671 	slab->inuse++;
3672 
3673 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
3674 		remove_partial(n, slab);
3675 		return NULL;
3676 	}
3677 
3678 	if (slab->inuse == slab->objects) {
3679 		remove_partial(n, slab);
3680 		add_full(s, n, slab);
3681 	}
3682 
3683 	return object;
3684 }
3685 
3686 /*
3687  * Called only for kmem_cache_debug() caches to allocate from a freshly
3688  * allocated slab. Allocate a single object instead of whole freelist
3689  * and put the slab to the partial (or full) list.
3690  */
alloc_single_from_new_slab(struct kmem_cache * s,struct slab * slab,int orig_size,gfp_t gfpflags)3691 static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
3692 					int orig_size, gfp_t gfpflags)
3693 {
3694 	bool allow_spin = gfpflags_allow_spinning(gfpflags);
3695 	int nid = slab_nid(slab);
3696 	struct kmem_cache_node *n = get_node(s, nid);
3697 	unsigned long flags;
3698 	void *object;
3699 
3700 	if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
3701 		/* Unlucky, discard newly allocated slab. */
3702 		free_new_slab_nolock(s, slab);
3703 		return NULL;
3704 	}
3705 
3706 	object = slab->freelist;
3707 	slab->freelist = get_freepointer(s, object);
3708 	slab->inuse = 1;
3709 
3710 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
3711 		/*
3712 		 * It's not really expected that this would fail on a
3713 		 * freshly allocated slab, but a concurrent memory
3714 		 * corruption in theory could cause that.
3715 		 * Leak memory of allocated slab.
3716 		 */
3717 		if (!allow_spin)
3718 			spin_unlock_irqrestore(&n->list_lock, flags);
3719 		return NULL;
3720 	}
3721 
3722 	if (allow_spin)
3723 		spin_lock_irqsave(&n->list_lock, flags);
3724 
3725 	if (slab->inuse == slab->objects)
3726 		add_full(s, n, slab);
3727 	else
3728 		add_partial(n, slab, ADD_TO_HEAD);
3729 
3730 	inc_slabs_node(s, nid, slab->objects);
3731 	spin_unlock_irqrestore(&n->list_lock, flags);
3732 
3733 	return object;
3734 }
3735 
3736 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
3737 
get_partial_node_bulk(struct kmem_cache * s,struct kmem_cache_node * n,struct partial_bulk_context * pc,bool allow_spin)3738 static bool get_partial_node_bulk(struct kmem_cache *s,
3739 				  struct kmem_cache_node *n,
3740 				  struct partial_bulk_context *pc,
3741 				  bool allow_spin)
3742 {
3743 	struct slab *slab, *slab2;
3744 	unsigned int total_free = 0;
3745 	unsigned long flags;
3746 
3747 	/* Racy check to avoid taking the lock unnecessarily. */
3748 	if (!n || data_race(!n->nr_partial))
3749 		return false;
3750 
3751 	INIT_LIST_HEAD(&pc->slabs);
3752 
3753 	if (allow_spin)
3754 		spin_lock_irqsave(&n->list_lock, flags);
3755 	else if (!spin_trylock_irqsave(&n->list_lock, flags))
3756 		return false;
3757 
3758 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
3759 		struct freelist_counters flc;
3760 		unsigned int slab_free;
3761 
3762 		if (!pfmemalloc_match(slab, pc->flags))
3763 			continue;
3764 
3765 		/*
3766 		 * determine the number of free objects in the slab racily
3767 		 *
3768 		 * slab_free is a lower bound due to possible subsequent
3769 		 * concurrent freeing, so the caller may get more objects than
3770 		 * requested and must handle that
3771 		 */
3772 		flc.counters = data_race(READ_ONCE(slab->counters));
3773 		slab_free = flc.objects - flc.inuse;
3774 
3775 		/* we have already min and this would get us over the max */
3776 		if (total_free >= pc->min_objects
3777 		    && total_free + slab_free > pc->max_objects)
3778 			break;
3779 
3780 		remove_partial(n, slab);
3781 
3782 		list_add(&slab->slab_list, &pc->slabs);
3783 
3784 		total_free += slab_free;
3785 		if (total_free >= pc->max_objects)
3786 			break;
3787 	}
3788 
3789 	spin_unlock_irqrestore(&n->list_lock, flags);
3790 	return total_free > 0;
3791 }
3792 
3793 /*
3794  * Try to allocate object from a partial slab on a specific node.
3795  */
get_from_partial_node(struct kmem_cache * s,struct kmem_cache_node * n,struct partial_context * pc)3796 static void *get_from_partial_node(struct kmem_cache *s,
3797 				   struct kmem_cache_node *n,
3798 				   struct partial_context *pc)
3799 {
3800 	struct slab *slab, *slab2;
3801 	unsigned long flags;
3802 	void *object = NULL;
3803 
3804 	/*
3805 	 * Racy check. If we mistakenly see no partial slabs then we
3806 	 * just allocate an empty slab. If we mistakenly try to get a
3807 	 * partial slab and there is none available then get_from_partial()
3808 	 * will return NULL.
3809 	 */
3810 	if (!n || !n->nr_partial)
3811 		return NULL;
3812 
3813 	if (gfpflags_allow_spinning(pc->flags))
3814 		spin_lock_irqsave(&n->list_lock, flags);
3815 	else if (!spin_trylock_irqsave(&n->list_lock, flags))
3816 		return NULL;
3817 	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
3818 
3819 		struct freelist_counters old, new;
3820 
3821 		if (!pfmemalloc_match(slab, pc->flags))
3822 			continue;
3823 
3824 		if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
3825 			object = alloc_single_from_partial(s, n, slab,
3826 							pc->orig_size);
3827 			if (object)
3828 				break;
3829 			continue;
3830 		}
3831 
3832 		/*
3833 		 * get a single object from the slab. This might race against
3834 		 * __slab_free(), which however has to take the list_lock if
3835 		 * it's about to make the slab fully free.
3836 		 */
3837 		do {
3838 			old.freelist = slab->freelist;
3839 			old.counters = slab->counters;
3840 
3841 			new.freelist = get_freepointer(s, old.freelist);
3842 			new.counters = old.counters;
3843 			new.inuse++;
3844 
3845 		} while (!__slab_update_freelist(s, slab, &old, &new, "get_from_partial_node"));
3846 
3847 		object = old.freelist;
3848 		if (!new.freelist)
3849 			remove_partial(n, slab);
3850 
3851 		break;
3852 	}
3853 	spin_unlock_irqrestore(&n->list_lock, flags);
3854 	return object;
3855 }
3856 
3857 /*
3858  * Get an object from somewhere. Search in increasing NUMA distances.
3859  */
get_from_any_partial(struct kmem_cache * s,struct partial_context * pc)3860 static void *get_from_any_partial(struct kmem_cache *s, struct partial_context *pc)
3861 {
3862 #ifdef CONFIG_NUMA
3863 	struct zonelist *zonelist;
3864 	struct zoneref *z;
3865 	struct zone *zone;
3866 	enum zone_type highest_zoneidx = gfp_zone(pc->flags);
3867 	unsigned int cpuset_mems_cookie;
3868 	bool allow_spin = gfpflags_allow_spinning(pc->flags);
3869 
3870 	/*
3871 	 * The defrag ratio allows a configuration of the tradeoffs between
3872 	 * inter node defragmentation and node local allocations. A lower
3873 	 * defrag_ratio increases the tendency to do local allocations
3874 	 * instead of attempting to obtain partial slabs from other nodes.
3875 	 *
3876 	 * If the defrag_ratio is set to 0 then kmalloc() always
3877 	 * returns node local objects. If the ratio is higher then kmalloc()
3878 	 * may return off node objects because partial slabs are obtained
3879 	 * from other nodes and filled up.
3880 	 *
3881 	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
3882 	 * (which makes defrag_ratio = 1000) then every (well almost)
3883 	 * allocation will first attempt to defrag slab caches on other nodes.
3884 	 * This means scanning over all nodes to look for partial slabs which
3885 	 * may be expensive if we do it every time we are trying to find a slab
3886 	 * with available objects.
3887 	 */
3888 	if (!s->remote_node_defrag_ratio ||
3889 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
3890 		return NULL;
3891 
3892 	do {
3893 		/*
3894 		 * read_mems_allowed_begin() accesses current->mems_allowed_seq,
3895 		 * a seqcount_spinlock_t that is not NMI-safe. Do not access
3896 		 * current->mems_allowed_seq and avoid retry when GFP flags
3897 		 * indicate spinning is not allowed.
3898 		 */
3899 		if (allow_spin)
3900 			cpuset_mems_cookie = read_mems_allowed_begin();
3901 
3902 		zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
3903 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3904 			struct kmem_cache_node *n;
3905 
3906 			n = get_node(s, zone_to_nid(zone));
3907 
3908 			if (n && cpuset_zone_allowed(zone, pc->flags) &&
3909 					n->nr_partial > s->min_partial) {
3910 
3911 				void *object = get_from_partial_node(s, n, pc);
3912 
3913 				if (object) {
3914 					/*
3915 					 * Don't check read_mems_allowed_retry()
3916 					 * here - if mems_allowed was updated in
3917 					 * parallel, that was a harmless race
3918 					 * between allocation and the cpuset
3919 					 * update
3920 					 */
3921 					return object;
3922 				}
3923 			}
3924 		}
3925 	} while (allow_spin && read_mems_allowed_retry(cpuset_mems_cookie));
3926 #endif	/* CONFIG_NUMA */
3927 	return NULL;
3928 }
3929 
3930 /*
3931  * Get an object from a partial slab
3932  */
get_from_partial(struct kmem_cache * s,int node,struct partial_context * pc)3933 static void *get_from_partial(struct kmem_cache *s, int node,
3934 			      struct partial_context *pc)
3935 {
3936 	int searchnode = node;
3937 	void *object;
3938 
3939 	if (node == NUMA_NO_NODE)
3940 		searchnode = numa_mem_id();
3941 
3942 	object = get_from_partial_node(s, get_node(s, searchnode), pc);
3943 	if (object || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
3944 		return object;
3945 
3946 	return get_from_any_partial(s, pc);
3947 }
3948 
has_pcs_used(int cpu,struct kmem_cache * s)3949 static bool has_pcs_used(int cpu, struct kmem_cache *s)
3950 {
3951 	struct slub_percpu_sheaves *pcs;
3952 
3953 	if (!cache_has_sheaves(s))
3954 		return false;
3955 
3956 	pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
3957 
3958 	return (pcs->spare || pcs->rcu_free || pcs->main->size);
3959 }
3960 
3961 /*
3962  * Flush percpu sheaves
3963  *
3964  * Called from CPU work handler with migration disabled.
3965  */
flush_cpu_sheaves(struct work_struct * w)3966 static void flush_cpu_sheaves(struct work_struct *w)
3967 {
3968 	struct kmem_cache *s;
3969 	struct slub_flush_work *sfw;
3970 
3971 	sfw = container_of(w, struct slub_flush_work, work);
3972 
3973 	s = sfw->s;
3974 
3975 	if (cache_has_sheaves(s))
3976 		pcs_flush_all(s);
3977 }
3978 
flush_all_cpus_locked(struct kmem_cache * s)3979 static void flush_all_cpus_locked(struct kmem_cache *s)
3980 {
3981 	struct slub_flush_work *sfw;
3982 	unsigned int cpu;
3983 
3984 	lockdep_assert_cpus_held();
3985 	mutex_lock(&flush_lock);
3986 
3987 	for_each_online_cpu(cpu) {
3988 		sfw = &per_cpu(slub_flush, cpu);
3989 		if (!has_pcs_used(cpu, s)) {
3990 			sfw->skip = true;
3991 			continue;
3992 		}
3993 		INIT_WORK(&sfw->work, flush_cpu_sheaves);
3994 		sfw->skip = false;
3995 		sfw->s = s;
3996 		queue_work_on(cpu, flushwq, &sfw->work);
3997 	}
3998 
3999 	for_each_online_cpu(cpu) {
4000 		sfw = &per_cpu(slub_flush, cpu);
4001 		if (sfw->skip)
4002 			continue;
4003 		flush_work(&sfw->work);
4004 	}
4005 
4006 	mutex_unlock(&flush_lock);
4007 }
4008 
flush_all(struct kmem_cache * s)4009 static void flush_all(struct kmem_cache *s)
4010 {
4011 	cpus_read_lock();
4012 	flush_all_cpus_locked(s);
4013 	cpus_read_unlock();
4014 }
4015 
flush_rcu_sheaf(struct work_struct * w)4016 static void flush_rcu_sheaf(struct work_struct *w)
4017 {
4018 	struct slub_percpu_sheaves *pcs;
4019 	struct slab_sheaf *rcu_free;
4020 	struct slub_flush_work *sfw;
4021 	struct kmem_cache *s;
4022 
4023 	sfw = container_of(w, struct slub_flush_work, work);
4024 	s = sfw->s;
4025 
4026 	local_lock(&s->cpu_sheaves->lock);
4027 	pcs = this_cpu_ptr(s->cpu_sheaves);
4028 
4029 	rcu_free = pcs->rcu_free;
4030 	pcs->rcu_free = NULL;
4031 
4032 	local_unlock(&s->cpu_sheaves->lock);
4033 
4034 	if (rcu_free)
4035 		call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
4036 }
4037 
4038 
4039 /* needed for kvfree_rcu_barrier() */
flush_rcu_sheaves_on_cache(struct kmem_cache * s)4040 void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
4041 {
4042 	struct slub_flush_work *sfw;
4043 	unsigned int cpu;
4044 
4045 	mutex_lock(&flush_lock);
4046 
4047 	for_each_online_cpu(cpu) {
4048 		sfw = &per_cpu(slub_flush, cpu);
4049 
4050 		/*
4051 		 * we don't check if rcu_free sheaf exists - racing
4052 		 * __kfree_rcu_sheaf() might have just removed it.
4053 		 * by executing flush_rcu_sheaf() on the cpu we make
4054 		 * sure the __kfree_rcu_sheaf() finished its call_rcu()
4055 		 */
4056 
4057 		INIT_WORK(&sfw->work, flush_rcu_sheaf);
4058 		sfw->s = s;
4059 		queue_work_on(cpu, flushwq, &sfw->work);
4060 	}
4061 
4062 	for_each_online_cpu(cpu) {
4063 		sfw = &per_cpu(slub_flush, cpu);
4064 		flush_work(&sfw->work);
4065 	}
4066 
4067 	mutex_unlock(&flush_lock);
4068 }
4069 
flush_all_rcu_sheaves(void)4070 void flush_all_rcu_sheaves(void)
4071 {
4072 	struct kmem_cache *s;
4073 
4074 	cpus_read_lock();
4075 	mutex_lock(&slab_mutex);
4076 
4077 	list_for_each_entry(s, &slab_caches, list) {
4078 		if (!cache_has_sheaves(s))
4079 			continue;
4080 		flush_rcu_sheaves_on_cache(s);
4081 	}
4082 
4083 	mutex_unlock(&slab_mutex);
4084 	cpus_read_unlock();
4085 
4086 	rcu_barrier();
4087 }
4088 
4089 /*
4090  * Use the cpu notifier to insure that the cpu slabs are flushed when
4091  * necessary.
4092  */
slub_cpu_dead(unsigned int cpu)4093 static int slub_cpu_dead(unsigned int cpu)
4094 {
4095 	struct kmem_cache *s;
4096 
4097 	mutex_lock(&slab_mutex);
4098 	list_for_each_entry(s, &slab_caches, list) {
4099 		if (cache_has_sheaves(s))
4100 			__pcs_flush_all_cpu(s, cpu);
4101 	}
4102 	mutex_unlock(&slab_mutex);
4103 	return 0;
4104 }
4105 
4106 #ifdef CONFIG_SLUB_DEBUG
count_free(struct slab * slab)4107 static int count_free(struct slab *slab)
4108 {
4109 	return slab->objects - slab->inuse;
4110 }
4111 
node_nr_objs(struct kmem_cache_node * n)4112 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
4113 {
4114 	return atomic_long_read(&n->total_objects);
4115 }
4116 
4117 /* Supports checking bulk free of a constructed freelist */
free_debug_processing(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int * bulk_cnt,unsigned long addr,depot_stack_handle_t handle)4118 static inline bool free_debug_processing(struct kmem_cache *s,
4119 	struct slab *slab, void *head, void *tail, int *bulk_cnt,
4120 	unsigned long addr, depot_stack_handle_t handle)
4121 {
4122 	bool checks_ok = false;
4123 	void *object = head;
4124 	int cnt = 0;
4125 
4126 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
4127 		if (!check_slab(s, slab))
4128 			goto out;
4129 	}
4130 
4131 	if (slab->inuse < *bulk_cnt) {
4132 		slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
4133 			 slab->inuse, *bulk_cnt);
4134 		goto out;
4135 	}
4136 
4137 next_object:
4138 
4139 	if (++cnt > *bulk_cnt)
4140 		goto out_cnt;
4141 
4142 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
4143 		if (!free_consistency_checks(s, slab, object, addr))
4144 			goto out;
4145 	}
4146 
4147 	if (s->flags & SLAB_STORE_USER)
4148 		set_track_update(s, object, TRACK_FREE, addr, handle);
4149 	trace(s, slab, object, 0);
4150 	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
4151 	init_object(s, object, SLUB_RED_INACTIVE);
4152 
4153 	/* Reached end of constructed freelist yet? */
4154 	if (object != tail) {
4155 		object = get_freepointer(s, object);
4156 		goto next_object;
4157 	}
4158 	checks_ok = true;
4159 
4160 out_cnt:
4161 	if (cnt != *bulk_cnt) {
4162 		slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
4163 			 *bulk_cnt, cnt);
4164 		*bulk_cnt = cnt;
4165 	}
4166 
4167 out:
4168 
4169 	if (!checks_ok)
4170 		slab_fix(s, "Object at 0x%p not freed", object);
4171 
4172 	return checks_ok;
4173 }
4174 #endif /* CONFIG_SLUB_DEBUG */
4175 
4176 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
count_partial(struct kmem_cache_node * n,int (* get_count)(struct slab *))4177 static unsigned long count_partial(struct kmem_cache_node *n,
4178 					int (*get_count)(struct slab *))
4179 {
4180 	unsigned long flags;
4181 	unsigned long x = 0;
4182 	struct slab *slab;
4183 
4184 	spin_lock_irqsave(&n->list_lock, flags);
4185 	list_for_each_entry(slab, &n->partial, slab_list)
4186 		x += get_count(slab);
4187 	spin_unlock_irqrestore(&n->list_lock, flags);
4188 	return x;
4189 }
4190 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
4191 
4192 #ifdef CONFIG_SLUB_DEBUG
4193 #define MAX_PARTIAL_TO_SCAN 10000
4194 
count_partial_free_approx(struct kmem_cache_node * n)4195 static unsigned long count_partial_free_approx(struct kmem_cache_node *n)
4196 {
4197 	unsigned long flags;
4198 	unsigned long x = 0;
4199 	struct slab *slab;
4200 
4201 	spin_lock_irqsave(&n->list_lock, flags);
4202 	if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
4203 		list_for_each_entry(slab, &n->partial, slab_list)
4204 			x += slab->objects - slab->inuse;
4205 	} else {
4206 		/*
4207 		 * For a long list, approximate the total count of objects in
4208 		 * it to meet the limit on the number of slabs to scan.
4209 		 * Scan from both the list's head and tail for better accuracy.
4210 		 */
4211 		unsigned long scanned = 0;
4212 
4213 		list_for_each_entry(slab, &n->partial, slab_list) {
4214 			x += slab->objects - slab->inuse;
4215 			if (++scanned == MAX_PARTIAL_TO_SCAN / 2)
4216 				break;
4217 		}
4218 		list_for_each_entry_reverse(slab, &n->partial, slab_list) {
4219 			x += slab->objects - slab->inuse;
4220 			if (++scanned == MAX_PARTIAL_TO_SCAN)
4221 				break;
4222 		}
4223 		x = mult_frac(x, n->nr_partial, scanned);
4224 		x = min(x, node_nr_objs(n));
4225 	}
4226 	spin_unlock_irqrestore(&n->list_lock, flags);
4227 	return x;
4228 }
4229 
4230 static noinline void
slab_out_of_memory(struct kmem_cache * s,gfp_t gfpflags,int nid)4231 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
4232 {
4233 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
4234 				      DEFAULT_RATELIMIT_BURST);
4235 	int cpu = raw_smp_processor_id();
4236 	int node;
4237 	struct kmem_cache_node *n;
4238 
4239 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
4240 		return;
4241 
4242 	pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n",
4243 		cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
4244 	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
4245 		s->name, s->object_size, s->size, oo_order(s->oo),
4246 		oo_order(s->min));
4247 
4248 	if (oo_order(s->min) > get_order(s->object_size))
4249 		pr_warn("  %s debugging increased min order, use slab_debug=O to disable.\n",
4250 			s->name);
4251 
4252 	for_each_kmem_cache_node(s, node, n) {
4253 		unsigned long nr_slabs;
4254 		unsigned long nr_objs;
4255 		unsigned long nr_free;
4256 
4257 		nr_free  = count_partial_free_approx(n);
4258 		nr_slabs = node_nr_slabs(n);
4259 		nr_objs  = node_nr_objs(n);
4260 
4261 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
4262 			node, nr_slabs, nr_objs, nr_free);
4263 	}
4264 }
4265 #else /* CONFIG_SLUB_DEBUG */
4266 static inline void
slab_out_of_memory(struct kmem_cache * s,gfp_t gfpflags,int nid)4267 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
4268 #endif
4269 
pfmemalloc_match(struct slab * slab,gfp_t gfpflags)4270 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
4271 {
4272 	if (unlikely(slab_test_pfmemalloc(slab)))
4273 		return gfp_pfmemalloc_allowed(gfpflags);
4274 
4275 	return true;
4276 }
4277 
4278 /*
4279  * Get the slab's freelist and do not freeze it.
4280  *
4281  * Assumes the slab is isolated from node partial list and not frozen.
4282  *
4283  * Assumes this is performed only for caches without debugging so we
4284  * don't need to worry about adding the slab to the full list.
4285  */
get_freelist_nofreeze(struct kmem_cache * s,struct slab * slab)4286 static inline void *get_freelist_nofreeze(struct kmem_cache *s, struct slab *slab)
4287 {
4288 	struct freelist_counters old, new;
4289 
4290 	do {
4291 		old.freelist = slab->freelist;
4292 		old.counters = slab->counters;
4293 
4294 		new.freelist = NULL;
4295 		new.counters = old.counters;
4296 		VM_WARN_ON_ONCE(new.frozen);
4297 
4298 		new.inuse = old.objects;
4299 
4300 	} while (!slab_update_freelist(s, slab, &old, &new, "get_freelist_nofreeze"));
4301 
4302 	return old.freelist;
4303 }
4304 
4305 /*
4306  * If the object has been wiped upon free, make sure it's fully initialized by
4307  * zeroing out freelist pointer.
4308  *
4309  * Note that we also wipe custom freelist pointers.
4310  */
maybe_wipe_obj_freeptr(struct kmem_cache * s,void * obj)4311 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
4312 						   void *obj)
4313 {
4314 	if (unlikely(slab_want_init_on_free(s)) && obj &&
4315 	    !freeptr_outside_object(s))
4316 		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
4317 			0, sizeof(void *));
4318 }
4319 
alloc_from_new_slab(struct kmem_cache * s,struct slab * slab,void ** p,unsigned int count,bool allow_spin)4320 static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
4321 		void **p, unsigned int count, bool allow_spin)
4322 {
4323 	unsigned int allocated = 0;
4324 	struct kmem_cache_node *n;
4325 	bool needs_add_partial;
4326 	unsigned long flags;
4327 	void *object;
4328 
4329 	/*
4330 	 * Are we going to put the slab on the partial list?
4331 	 * Note slab->inuse is 0 on a new slab.
4332 	 */
4333 	needs_add_partial = (slab->objects > count);
4334 
4335 	if (!allow_spin && needs_add_partial) {
4336 
4337 		n = get_node(s, slab_nid(slab));
4338 
4339 		if (!spin_trylock_irqsave(&n->list_lock, flags)) {
4340 			/* Unlucky, discard newly allocated slab */
4341 			free_new_slab_nolock(s, slab);
4342 			return 0;
4343 		}
4344 	}
4345 
4346 	object = slab->freelist;
4347 	while (object && allocated < count) {
4348 		p[allocated] = object;
4349 		object = get_freepointer(s, object);
4350 		maybe_wipe_obj_freeptr(s, p[allocated]);
4351 
4352 		slab->inuse++;
4353 		allocated++;
4354 	}
4355 	slab->freelist = object;
4356 
4357 	if (needs_add_partial) {
4358 
4359 		if (allow_spin) {
4360 			n = get_node(s, slab_nid(slab));
4361 			spin_lock_irqsave(&n->list_lock, flags);
4362 		}
4363 		add_partial(n, slab, ADD_TO_HEAD);
4364 		spin_unlock_irqrestore(&n->list_lock, flags);
4365 	}
4366 
4367 	inc_slabs_node(s, slab_nid(slab), slab->objects);
4368 	return allocated;
4369 }
4370 
4371 /*
4372  * Slow path. We failed to allocate via percpu sheaves or they are not available
4373  * due to bootstrap or debugging enabled or SLUB_TINY.
4374  *
4375  * We try to allocate from partial slab lists and fall back to allocating a new
4376  * slab.
4377  */
___slab_alloc(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,unsigned int orig_size)4378 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
4379 			   unsigned long addr, unsigned int orig_size)
4380 {
4381 	bool allow_spin = gfpflags_allow_spinning(gfpflags);
4382 	void *object;
4383 	struct slab *slab;
4384 	struct partial_context pc;
4385 	bool try_thisnode = true;
4386 
4387 	stat(s, ALLOC_SLOWPATH);
4388 
4389 new_objects:
4390 
4391 	pc.flags = gfpflags;
4392 	/*
4393 	 * When a preferred node is indicated but no __GFP_THISNODE
4394 	 *
4395 	 * 1) try to get a partial slab from target node only by having
4396 	 *    __GFP_THISNODE in pc.flags for get_from_partial()
4397 	 * 2) if 1) failed, try to allocate a new slab from target node with
4398 	 *    GPF_NOWAIT | __GFP_THISNODE opportunistically
4399 	 * 3) if 2) failed, retry with original gfpflags which will allow
4400 	 *    get_from_partial() try partial lists of other nodes before
4401 	 *    potentially allocating new page from other nodes
4402 	 */
4403 	if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
4404 		     && try_thisnode)) {
4405 		if (unlikely(!allow_spin))
4406 			/* Do not upgrade gfp to NOWAIT from more restrictive mode */
4407 			pc.flags = gfpflags | __GFP_THISNODE;
4408 		else
4409 			pc.flags = GFP_NOWAIT | __GFP_THISNODE;
4410 	}
4411 
4412 	pc.orig_size = orig_size;
4413 	object = get_from_partial(s, node, &pc);
4414 	if (object)
4415 		goto success;
4416 
4417 	slab = new_slab(s, pc.flags, node);
4418 
4419 	if (unlikely(!slab)) {
4420 		if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
4421 		    && try_thisnode) {
4422 			try_thisnode = false;
4423 			goto new_objects;
4424 		}
4425 		slab_out_of_memory(s, gfpflags, node);
4426 		return NULL;
4427 	}
4428 
4429 	stat(s, ALLOC_SLAB);
4430 
4431 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4432 		object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
4433 
4434 		if (likely(object))
4435 			goto success;
4436 	} else {
4437 		alloc_from_new_slab(s, slab, &object, 1, allow_spin);
4438 
4439 		/* we don't need to check SLAB_STORE_USER here */
4440 		if (likely(object))
4441 			return object;
4442 	}
4443 
4444 	if (allow_spin)
4445 		goto new_objects;
4446 
4447 	/* This could cause an endless loop. Fail instead. */
4448 	return NULL;
4449 
4450 success:
4451 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
4452 		set_track(s, object, TRACK_ALLOC, addr, gfpflags);
4453 
4454 	return object;
4455 }
4456 
__slab_alloc_node(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,size_t orig_size)4457 static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
4458 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4459 {
4460 	void *object;
4461 
4462 #ifdef CONFIG_NUMA
4463 	if (static_branch_unlikely(&strict_numa) &&
4464 			node == NUMA_NO_NODE) {
4465 
4466 		struct mempolicy *mpol = current->mempolicy;
4467 
4468 		if (mpol) {
4469 			/*
4470 			 * Special BIND rule support. If the local node
4471 			 * is in permitted set then do not redirect
4472 			 * to a particular node.
4473 			 * Otherwise we apply the memory policy to get
4474 			 * the node we need to allocate on.
4475 			 */
4476 			if (mpol->mode != MPOL_BIND ||
4477 					!node_isset(numa_mem_id(), mpol->nodes))
4478 				node = mempolicy_slab_node();
4479 		}
4480 	}
4481 #endif
4482 
4483 	object = ___slab_alloc(s, gfpflags, node, addr, orig_size);
4484 
4485 	return object;
4486 }
4487 
4488 static __fastpath_inline
slab_pre_alloc_hook(struct kmem_cache * s,gfp_t flags)4489 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
4490 {
4491 	flags &= gfp_allowed_mask;
4492 
4493 	might_alloc(flags);
4494 
4495 	if (unlikely(should_failslab(s, flags)))
4496 		return NULL;
4497 
4498 	return s;
4499 }
4500 
4501 static __fastpath_inline
slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p,bool init,unsigned int orig_size)4502 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
4503 			  gfp_t flags, size_t size, void **p, bool init,
4504 			  unsigned int orig_size)
4505 {
4506 	unsigned int zero_size = s->object_size;
4507 	bool kasan_init = init;
4508 	size_t i;
4509 	gfp_t init_flags = flags & gfp_allowed_mask;
4510 
4511 	/*
4512 	 * For kmalloc object, the allocated memory size(object_size) is likely
4513 	 * larger than the requested size(orig_size). If redzone check is
4514 	 * enabled for the extra space, don't zero it, as it will be redzoned
4515 	 * soon. The redzone operation for this extra space could be seen as a
4516 	 * replacement of current poisoning under certain debug option, and
4517 	 * won't break other sanity checks.
4518 	 */
4519 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
4520 	    (s->flags & SLAB_KMALLOC))
4521 		zero_size = orig_size;
4522 
4523 	/*
4524 	 * When slab_debug is enabled, avoid memory initialization integrated
4525 	 * into KASAN and instead zero out the memory via the memset below with
4526 	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
4527 	 * cause false-positive reports. This does not lead to a performance
4528 	 * penalty on production builds, as slab_debug is not intended to be
4529 	 * enabled there.
4530 	 */
4531 	if (__slub_debug_enabled())
4532 		kasan_init = false;
4533 
4534 	/*
4535 	 * As memory initialization might be integrated into KASAN,
4536 	 * kasan_slab_alloc and initialization memset must be
4537 	 * kept together to avoid discrepancies in behavior.
4538 	 *
4539 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
4540 	 */
4541 	for (i = 0; i < size; i++) {
4542 		p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
4543 		if (p[i] && init && (!kasan_init ||
4544 				     !kasan_has_integrated_init()))
4545 			memset(p[i], 0, zero_size);
4546 		if (gfpflags_allow_spinning(flags))
4547 			kmemleak_alloc_recursive(p[i], s->object_size, 1,
4548 						 s->flags, init_flags);
4549 		kmsan_slab_alloc(s, p[i], init_flags);
4550 		alloc_tagging_slab_alloc_hook(s, p[i], flags);
4551 	}
4552 
4553 	return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
4554 }
4555 
4556 /*
4557  * Replace the empty main sheaf with a (at least partially) full sheaf.
4558  *
4559  * Must be called with the cpu_sheaves local lock locked. If successful, returns
4560  * the pcs pointer and the local lock locked (possibly on a different cpu than
4561  * initially called). If not successful, returns NULL and the local lock
4562  * unlocked.
4563  */
4564 static struct slub_percpu_sheaves *
__pcs_replace_empty_main(struct kmem_cache * s,struct slub_percpu_sheaves * pcs,gfp_t gfp)4565 __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, gfp_t gfp)
4566 {
4567 	struct slab_sheaf *empty = NULL;
4568 	struct slab_sheaf *full;
4569 	struct node_barn *barn;
4570 	bool allow_spin;
4571 
4572 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
4573 
4574 	/* Bootstrap or debug cache, back off */
4575 	if (unlikely(!cache_has_sheaves(s))) {
4576 		local_unlock(&s->cpu_sheaves->lock);
4577 		return NULL;
4578 	}
4579 
4580 	if (pcs->spare && pcs->spare->size > 0) {
4581 		swap(pcs->main, pcs->spare);
4582 		return pcs;
4583 	}
4584 
4585 	barn = get_barn(s);
4586 	if (!barn) {
4587 		local_unlock(&s->cpu_sheaves->lock);
4588 		return NULL;
4589 	}
4590 
4591 	allow_spin = gfpflags_allow_spinning(gfp);
4592 
4593 	full = barn_replace_empty_sheaf(barn, pcs->main, allow_spin);
4594 
4595 	if (full) {
4596 		stat(s, BARN_GET);
4597 		pcs->main = full;
4598 		return pcs;
4599 	}
4600 
4601 	stat(s, BARN_GET_FAIL);
4602 
4603 	if (allow_spin) {
4604 		if (pcs->spare) {
4605 			empty = pcs->spare;
4606 			pcs->spare = NULL;
4607 		} else {
4608 			empty = barn_get_empty_sheaf(barn, true);
4609 		}
4610 	}
4611 
4612 	local_unlock(&s->cpu_sheaves->lock);
4613 	pcs = NULL;
4614 
4615 	if (!allow_spin)
4616 		return NULL;
4617 
4618 	if (empty) {
4619 		if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
4620 			full = empty;
4621 		} else {
4622 			/*
4623 			 * we must be very low on memory so don't bother
4624 			 * with the barn
4625 			 */
4626 			free_empty_sheaf(s, empty);
4627 		}
4628 	} else {
4629 		full = alloc_full_sheaf(s, gfp);
4630 	}
4631 
4632 	if (!full)
4633 		return NULL;
4634 
4635 	if (!local_trylock(&s->cpu_sheaves->lock))
4636 		goto barn_put;
4637 	pcs = this_cpu_ptr(s->cpu_sheaves);
4638 
4639 	/*
4640 	 * If we are returning empty sheaf, we either got it from the
4641 	 * barn or had to allocate one. If we are returning a full
4642 	 * sheaf, it's due to racing or being migrated to a different
4643 	 * cpu. Breaching the barn's sheaf limits should be thus rare
4644 	 * enough so just ignore them to simplify the recovery.
4645 	 */
4646 
4647 	if (pcs->main->size == 0) {
4648 		if (!pcs->spare)
4649 			pcs->spare = pcs->main;
4650 		else
4651 			barn_put_empty_sheaf(barn, pcs->main);
4652 		pcs->main = full;
4653 		return pcs;
4654 	}
4655 
4656 	if (!pcs->spare) {
4657 		pcs->spare = full;
4658 		return pcs;
4659 	}
4660 
4661 	if (pcs->spare->size == 0) {
4662 		barn_put_empty_sheaf(barn, pcs->spare);
4663 		pcs->spare = full;
4664 		return pcs;
4665 	}
4666 
4667 barn_put:
4668 	barn_put_full_sheaf(barn, full);
4669 	stat(s, BARN_PUT);
4670 
4671 	return pcs;
4672 }
4673 
4674 static __fastpath_inline
alloc_from_pcs(struct kmem_cache * s,gfp_t gfp,int node)4675 void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
4676 {
4677 	struct slub_percpu_sheaves *pcs;
4678 	bool node_requested;
4679 	void *object;
4680 
4681 #ifdef CONFIG_NUMA
4682 	if (static_branch_unlikely(&strict_numa) &&
4683 			 node == NUMA_NO_NODE) {
4684 
4685 		struct mempolicy *mpol = current->mempolicy;
4686 
4687 		if (mpol) {
4688 			/*
4689 			 * Special BIND rule support. If the local node
4690 			 * is in permitted set then do not redirect
4691 			 * to a particular node.
4692 			 * Otherwise we apply the memory policy to get
4693 			 * the node we need to allocate on.
4694 			 */
4695 			if (mpol->mode != MPOL_BIND ||
4696 					!node_isset(numa_mem_id(), mpol->nodes))
4697 
4698 				node = mempolicy_slab_node();
4699 		}
4700 	}
4701 #endif
4702 
4703 	node_requested = IS_ENABLED(CONFIG_NUMA) && node != NUMA_NO_NODE;
4704 
4705 	/*
4706 	 * We assume the percpu sheaves contain only local objects although it's
4707 	 * not completely guaranteed, so we verify later.
4708 	 */
4709 	if (unlikely(node_requested && node != numa_mem_id())) {
4710 		stat(s, ALLOC_NODE_MISMATCH);
4711 		return NULL;
4712 	}
4713 
4714 	if (!local_trylock(&s->cpu_sheaves->lock))
4715 		return NULL;
4716 
4717 	pcs = this_cpu_ptr(s->cpu_sheaves);
4718 
4719 	if (unlikely(pcs->main->size == 0)) {
4720 		pcs = __pcs_replace_empty_main(s, pcs, gfp);
4721 		if (unlikely(!pcs))
4722 			return NULL;
4723 	}
4724 
4725 	object = pcs->main->objects[pcs->main->size - 1];
4726 
4727 	if (unlikely(node_requested)) {
4728 		/*
4729 		 * Verify that the object was from the node we want. This could
4730 		 * be false because of cpu migration during an unlocked part of
4731 		 * the current allocation or previous freeing process.
4732 		 */
4733 		if (page_to_nid(virt_to_page(object)) != node) {
4734 			local_unlock(&s->cpu_sheaves->lock);
4735 			stat(s, ALLOC_NODE_MISMATCH);
4736 			return NULL;
4737 		}
4738 	}
4739 
4740 	pcs->main->size--;
4741 
4742 	local_unlock(&s->cpu_sheaves->lock);
4743 
4744 	stat(s, ALLOC_FASTPATH);
4745 
4746 	return object;
4747 }
4748 
4749 static __fastpath_inline
alloc_from_pcs_bulk(struct kmem_cache * s,gfp_t gfp,size_t size,void ** p)4750 unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, gfp_t gfp, size_t size,
4751 				 void **p)
4752 {
4753 	struct slub_percpu_sheaves *pcs;
4754 	struct slab_sheaf *main;
4755 	unsigned int allocated = 0;
4756 	unsigned int batch;
4757 
4758 next_batch:
4759 	if (!local_trylock(&s->cpu_sheaves->lock))
4760 		return allocated;
4761 
4762 	pcs = this_cpu_ptr(s->cpu_sheaves);
4763 
4764 	if (unlikely(pcs->main->size == 0)) {
4765 
4766 		struct slab_sheaf *full;
4767 		struct node_barn *barn;
4768 
4769 		if (unlikely(!cache_has_sheaves(s))) {
4770 			local_unlock(&s->cpu_sheaves->lock);
4771 			return allocated;
4772 		}
4773 
4774 		if (pcs->spare && pcs->spare->size > 0) {
4775 			swap(pcs->main, pcs->spare);
4776 			goto do_alloc;
4777 		}
4778 
4779 		barn = get_barn(s);
4780 		if (!barn) {
4781 			local_unlock(&s->cpu_sheaves->lock);
4782 			return allocated;
4783 		}
4784 
4785 		full = barn_replace_empty_sheaf(barn, pcs->main,
4786 						gfpflags_allow_spinning(gfp));
4787 
4788 		if (full) {
4789 			stat(s, BARN_GET);
4790 			pcs->main = full;
4791 			goto do_alloc;
4792 		}
4793 
4794 		stat(s, BARN_GET_FAIL);
4795 
4796 		local_unlock(&s->cpu_sheaves->lock);
4797 
4798 		/*
4799 		 * Once full sheaves in barn are depleted, let the bulk
4800 		 * allocation continue from slab pages, otherwise we would just
4801 		 * be copying arrays of pointers twice.
4802 		 */
4803 		return allocated;
4804 	}
4805 
4806 do_alloc:
4807 
4808 	main = pcs->main;
4809 	batch = min(size, main->size);
4810 
4811 	main->size -= batch;
4812 	memcpy(p, main->objects + main->size, batch * sizeof(void *));
4813 
4814 	local_unlock(&s->cpu_sheaves->lock);
4815 
4816 	stat_add(s, ALLOC_FASTPATH, batch);
4817 
4818 	allocated += batch;
4819 
4820 	if (batch < size) {
4821 		p += batch;
4822 		size -= batch;
4823 		goto next_batch;
4824 	}
4825 
4826 	return allocated;
4827 }
4828 
4829 
4830 /*
4831  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
4832  * have the fastpath folded into their functions. So no function call
4833  * overhead for requests that can be satisfied on the fastpath.
4834  *
4835  * The fastpath works by first checking if the lockless freelist can be used.
4836  * If not then __slab_alloc is called for slow processing.
4837  *
4838  * Otherwise we can simply pick the next object from the lockless free list.
4839  */
slab_alloc_node(struct kmem_cache * s,struct list_lru * lru,gfp_t gfpflags,int node,unsigned long addr,size_t orig_size)4840 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
4841 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4842 {
4843 	void *object;
4844 	bool init = false;
4845 
4846 	s = slab_pre_alloc_hook(s, gfpflags);
4847 	if (unlikely(!s))
4848 		return NULL;
4849 
4850 	object = kfence_alloc(s, orig_size, gfpflags);
4851 	if (unlikely(object))
4852 		goto out;
4853 
4854 	object = alloc_from_pcs(s, gfpflags, node);
4855 
4856 	if (!object)
4857 		object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
4858 
4859 	maybe_wipe_obj_freeptr(s, object);
4860 	init = slab_want_init_on_alloc(gfpflags, s);
4861 
4862 out:
4863 	/*
4864 	 * When init equals 'true', like for kzalloc() family, only
4865 	 * @orig_size bytes might be zeroed instead of s->object_size
4866 	 * In case this fails due to memcg_slab_post_alloc_hook(),
4867 	 * object is set to NULL
4868 	 */
4869 	slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
4870 
4871 	return object;
4872 }
4873 
kmem_cache_alloc_noprof(struct kmem_cache * s,gfp_t gfpflags)4874 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
4875 {
4876 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
4877 				    s->object_size);
4878 
4879 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4880 
4881 	return ret;
4882 }
4883 EXPORT_SYMBOL(kmem_cache_alloc_noprof);
4884 
kmem_cache_alloc_lru_noprof(struct kmem_cache * s,struct list_lru * lru,gfp_t gfpflags)4885 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
4886 			   gfp_t gfpflags)
4887 {
4888 	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
4889 				    s->object_size);
4890 
4891 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4892 
4893 	return ret;
4894 }
4895 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
4896 
kmem_cache_charge(void * objp,gfp_t gfpflags)4897 bool kmem_cache_charge(void *objp, gfp_t gfpflags)
4898 {
4899 	if (!memcg_kmem_online())
4900 		return true;
4901 
4902 	return memcg_slab_post_charge(objp, gfpflags);
4903 }
4904 EXPORT_SYMBOL(kmem_cache_charge);
4905 
4906 /**
4907  * kmem_cache_alloc_node - Allocate an object on the specified node
4908  * @s: The cache to allocate from.
4909  * @gfpflags: See kmalloc().
4910  * @node: node number of the target node.
4911  *
4912  * Identical to kmem_cache_alloc but it will allocate memory on the given
4913  * node, which can improve the performance for cpu bound structures.
4914  *
4915  * Fallback to other node is possible if __GFP_THISNODE is not set.
4916  *
4917  * Return: pointer to the new object or %NULL in case of error
4918  */
kmem_cache_alloc_node_noprof(struct kmem_cache * s,gfp_t gfpflags,int node)4919 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
4920 {
4921 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4922 
4923 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
4924 
4925 	return ret;
4926 }
4927 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4928 
__prefill_sheaf_pfmemalloc(struct kmem_cache * s,struct slab_sheaf * sheaf,gfp_t gfp)4929 static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
4930 				      struct slab_sheaf *sheaf, gfp_t gfp)
4931 {
4932 	gfp_t gfp_nomemalloc;
4933 	int ret;
4934 
4935 	gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
4936 	if (gfp_pfmemalloc_allowed(gfp))
4937 		gfp_nomemalloc |= __GFP_NOWARN;
4938 
4939 	ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
4940 
4941 	if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
4942 		return ret;
4943 
4944 	/*
4945 	 * if we are allowed to, refill sheaf with pfmemalloc but then remember
4946 	 * it for when it's returned
4947 	 */
4948 	ret = refill_sheaf(s, sheaf, gfp);
4949 	sheaf->pfmemalloc = true;
4950 
4951 	return ret;
4952 }
4953 
4954 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
4955 				   size_t size, void **p);
4956 
4957 /*
4958  * returns a sheaf that has at least the requested size
4959  * when prefilling is needed, do so with given gfp flags
4960  *
4961  * return NULL if sheaf allocation or prefilling failed
4962  */
4963 struct slab_sheaf *
kmem_cache_prefill_sheaf(struct kmem_cache * s,gfp_t gfp,unsigned int size)4964 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
4965 {
4966 	struct slub_percpu_sheaves *pcs;
4967 	struct slab_sheaf *sheaf = NULL;
4968 	struct node_barn *barn;
4969 
4970 	if (unlikely(!size))
4971 		return NULL;
4972 
4973 	if (unlikely(size > s->sheaf_capacity)) {
4974 
4975 		sheaf = kzalloc_flex(*sheaf, objects, size, gfp);
4976 		if (!sheaf)
4977 			return NULL;
4978 
4979 		stat(s, SHEAF_PREFILL_OVERSIZE);
4980 		sheaf->cache = s;
4981 		sheaf->capacity = size;
4982 
4983 		/*
4984 		 * we do not need to care about pfmemalloc here because oversize
4985 		 * sheaves area always flushed and freed when returned
4986 		 */
4987 		if (!__kmem_cache_alloc_bulk(s, gfp, size,
4988 					     &sheaf->objects[0])) {
4989 			kfree(sheaf);
4990 			return NULL;
4991 		}
4992 
4993 		sheaf->size = size;
4994 
4995 		return sheaf;
4996 	}
4997 
4998 	local_lock(&s->cpu_sheaves->lock);
4999 	pcs = this_cpu_ptr(s->cpu_sheaves);
5000 
5001 	if (pcs->spare) {
5002 		sheaf = pcs->spare;
5003 		pcs->spare = NULL;
5004 		stat(s, SHEAF_PREFILL_FAST);
5005 	} else {
5006 		barn = get_barn(s);
5007 
5008 		stat(s, SHEAF_PREFILL_SLOW);
5009 		if (barn)
5010 			sheaf = barn_get_full_or_empty_sheaf(barn);
5011 		if (sheaf && sheaf->size)
5012 			stat(s, BARN_GET);
5013 		else
5014 			stat(s, BARN_GET_FAIL);
5015 	}
5016 
5017 	local_unlock(&s->cpu_sheaves->lock);
5018 
5019 
5020 	if (!sheaf)
5021 		sheaf = alloc_empty_sheaf(s, gfp);
5022 
5023 	if (sheaf) {
5024 		sheaf->capacity = s->sheaf_capacity;
5025 		sheaf->pfmemalloc = false;
5026 
5027 		if (sheaf->size < size &&
5028 		    __prefill_sheaf_pfmemalloc(s, sheaf, gfp)) {
5029 			sheaf_flush_unused(s, sheaf);
5030 			free_empty_sheaf(s, sheaf);
5031 			sheaf = NULL;
5032 		}
5033 	}
5034 
5035 	return sheaf;
5036 }
5037 
5038 /*
5039  * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf()
5040  *
5041  * If the sheaf cannot simply become the percpu spare sheaf, but there's space
5042  * for a full sheaf in the barn, we try to refill the sheaf back to the cache's
5043  * sheaf_capacity to avoid handling partially full sheaves.
5044  *
5045  * If the refill fails because gfp is e.g. GFP_NOWAIT, or the barn is full, the
5046  * sheaf is instead flushed and freed.
5047  */
kmem_cache_return_sheaf(struct kmem_cache * s,gfp_t gfp,struct slab_sheaf * sheaf)5048 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
5049 			     struct slab_sheaf *sheaf)
5050 {
5051 	struct slub_percpu_sheaves *pcs;
5052 	struct node_barn *barn;
5053 
5054 	if (unlikely((sheaf->capacity != s->sheaf_capacity)
5055 		     || sheaf->pfmemalloc)) {
5056 		sheaf_flush_unused(s, sheaf);
5057 		kfree(sheaf);
5058 		return;
5059 	}
5060 
5061 	local_lock(&s->cpu_sheaves->lock);
5062 	pcs = this_cpu_ptr(s->cpu_sheaves);
5063 	barn = get_barn(s);
5064 
5065 	if (!pcs->spare) {
5066 		pcs->spare = sheaf;
5067 		sheaf = NULL;
5068 		stat(s, SHEAF_RETURN_FAST);
5069 	}
5070 
5071 	local_unlock(&s->cpu_sheaves->lock);
5072 
5073 	if (!sheaf)
5074 		return;
5075 
5076 	stat(s, SHEAF_RETURN_SLOW);
5077 
5078 	/*
5079 	 * If the barn has too many full sheaves or we fail to refill the sheaf,
5080 	 * simply flush and free it.
5081 	 */
5082 	if (!barn || data_race(barn->nr_full) >= MAX_FULL_SHEAVES ||
5083 	    refill_sheaf(s, sheaf, gfp)) {
5084 		sheaf_flush_unused(s, sheaf);
5085 		free_empty_sheaf(s, sheaf);
5086 		return;
5087 	}
5088 
5089 	barn_put_full_sheaf(barn, sheaf);
5090 	stat(s, BARN_PUT);
5091 }
5092 
5093 /*
5094  * refill a sheaf previously returned by kmem_cache_prefill_sheaf to at least
5095  * the given size
5096  *
5097  * the sheaf might be replaced by a new one when requesting more than
5098  * s->sheaf_capacity objects if such replacement is necessary, but the refill
5099  * fails (returning -ENOMEM), the existing sheaf is left intact
5100  *
5101  * In practice we always refill to full sheaf's capacity.
5102  */
kmem_cache_refill_sheaf(struct kmem_cache * s,gfp_t gfp,struct slab_sheaf ** sheafp,unsigned int size)5103 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
5104 			    struct slab_sheaf **sheafp, unsigned int size)
5105 {
5106 	struct slab_sheaf *sheaf;
5107 
5108 	/*
5109 	 * TODO: do we want to support *sheaf == NULL to be equivalent of
5110 	 * kmem_cache_prefill_sheaf() ?
5111 	 */
5112 	if (!sheafp || !(*sheafp))
5113 		return -EINVAL;
5114 
5115 	sheaf = *sheafp;
5116 	if (sheaf->size >= size)
5117 		return 0;
5118 
5119 	if (likely(sheaf->capacity >= size)) {
5120 		if (likely(sheaf->capacity == s->sheaf_capacity))
5121 			return __prefill_sheaf_pfmemalloc(s, sheaf, gfp);
5122 
5123 		if (!__kmem_cache_alloc_bulk(s, gfp, sheaf->capacity - sheaf->size,
5124 					     &sheaf->objects[sheaf->size])) {
5125 			return -ENOMEM;
5126 		}
5127 		sheaf->size = sheaf->capacity;
5128 
5129 		return 0;
5130 	}
5131 
5132 	/*
5133 	 * We had a regular sized sheaf and need an oversize one, or we had an
5134 	 * oversize one already but need a larger one now.
5135 	 * This should be a very rare path so let's not complicate it.
5136 	 */
5137 	sheaf = kmem_cache_prefill_sheaf(s, gfp, size);
5138 	if (!sheaf)
5139 		return -ENOMEM;
5140 
5141 	kmem_cache_return_sheaf(s, gfp, *sheafp);
5142 	*sheafp = sheaf;
5143 	return 0;
5144 }
5145 
5146 /*
5147  * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf()
5148  *
5149  * Guaranteed not to fail as many allocations as was the requested size.
5150  * After the sheaf is emptied, it fails - no fallback to the slab cache itself.
5151  *
5152  * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT
5153  * memcg charging is forced over limit if necessary, to avoid failure.
5154  *
5155  * It is possible that the allocation comes from kfence and then the sheaf
5156  * size is not decreased.
5157  */
5158 void *
kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache * s,gfp_t gfp,struct slab_sheaf * sheaf)5159 kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
5160 				   struct slab_sheaf *sheaf)
5161 {
5162 	void *ret = NULL;
5163 	bool init;
5164 
5165 	if (sheaf->size == 0)
5166 		goto out;
5167 
5168 	ret = kfence_alloc(s, s->object_size, gfp);
5169 
5170 	if (likely(!ret))
5171 		ret = sheaf->objects[--sheaf->size];
5172 
5173 	init = slab_want_init_on_alloc(gfp, s);
5174 
5175 	/* add __GFP_NOFAIL to force successful memcg charging */
5176 	slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size);
5177 out:
5178 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE);
5179 
5180 	return ret;
5181 }
5182 
kmem_cache_sheaf_size(struct slab_sheaf * sheaf)5183 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
5184 {
5185 	return sheaf->size;
5186 }
5187 /*
5188  * To avoid unnecessary overhead, we pass through large allocation requests
5189  * directly to the page allocator. We use __GFP_COMP, because we will need to
5190  * know the allocation order to free the pages properly in kfree.
5191  */
___kmalloc_large_node(size_t size,gfp_t flags,int node)5192 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
5193 {
5194 	struct page *page;
5195 	void *ptr = NULL;
5196 	unsigned int order = get_order(size);
5197 
5198 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
5199 		flags = kmalloc_fix_flags(flags);
5200 
5201 	flags |= __GFP_COMP;
5202 
5203 	if (node == NUMA_NO_NODE)
5204 		page = alloc_frozen_pages_noprof(flags, order);
5205 	else
5206 		page = __alloc_frozen_pages_noprof(flags, order, node, NULL);
5207 
5208 	if (page) {
5209 		ptr = page_address(page);
5210 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
5211 				      PAGE_SIZE << order);
5212 		__SetPageLargeKmalloc(page);
5213 	}
5214 
5215 	ptr = kasan_kmalloc_large(ptr, size, flags);
5216 	/* As ptr might get tagged, call kmemleak hook after KASAN. */
5217 	kmemleak_alloc(ptr, size, 1, flags);
5218 	kmsan_kmalloc_large(ptr, size, flags);
5219 
5220 	return ptr;
5221 }
5222 
__kmalloc_large_noprof(size_t size,gfp_t flags)5223 void *__kmalloc_large_noprof(size_t size, gfp_t flags)
5224 {
5225 	void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
5226 
5227 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
5228 		      flags, NUMA_NO_NODE);
5229 	return ret;
5230 }
5231 EXPORT_SYMBOL(__kmalloc_large_noprof);
5232 
__kmalloc_large_node_noprof(size_t size,gfp_t flags,int node)5233 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
5234 {
5235 	void *ret = ___kmalloc_large_node(size, flags, node);
5236 
5237 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
5238 		      flags, node);
5239 	return ret;
5240 }
5241 EXPORT_SYMBOL(__kmalloc_large_node_noprof);
5242 
5243 static __always_inline
__do_kmalloc_node(size_t size,kmem_buckets * b,gfp_t flags,int node,unsigned long caller)5244 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
5245 			unsigned long caller)
5246 {
5247 	struct kmem_cache *s;
5248 	void *ret;
5249 
5250 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5251 		ret = __kmalloc_large_node_noprof(size, flags, node);
5252 		trace_kmalloc(caller, ret, size,
5253 			      PAGE_SIZE << get_order(size), flags, node);
5254 		return ret;
5255 	}
5256 
5257 	if (unlikely(!size))
5258 		return ZERO_SIZE_PTR;
5259 
5260 	s = kmalloc_slab(size, b, flags, caller);
5261 
5262 	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
5263 	ret = kasan_kmalloc(s, ret, size, flags);
5264 	trace_kmalloc(caller, ret, size, s->size, flags, node);
5265 	return ret;
5266 }
__kmalloc_node_noprof(DECL_BUCKET_PARAMS (size,b),gfp_t flags,int node)5267 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
5268 {
5269 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
5270 }
5271 EXPORT_SYMBOL(__kmalloc_node_noprof);
5272 
__kmalloc_noprof(size_t size,gfp_t flags)5273 void *__kmalloc_noprof(size_t size, gfp_t flags)
5274 {
5275 	return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
5276 }
5277 EXPORT_SYMBOL(__kmalloc_noprof);
5278 
5279 /**
5280  * kmalloc_nolock - Allocate an object of given size from any context.
5281  * @size: size to allocate
5282  * @gfp_flags: GFP flags. Only __GFP_ACCOUNT, __GFP_ZERO, __GFP_NO_OBJ_EXT
5283  * allowed.
5284  * @node: node number of the target node.
5285  *
5286  * Return: pointer to the new object or NULL in case of error.
5287  * NULL does not mean EBUSY or EAGAIN. It means ENOMEM.
5288  * There is no reason to call it again and expect !NULL.
5289  */
kmalloc_nolock_noprof(size_t size,gfp_t gfp_flags,int node)5290 void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
5291 {
5292 	gfp_t alloc_gfp = __GFP_NOWARN | __GFP_NOMEMALLOC | gfp_flags;
5293 	struct kmem_cache *s;
5294 	bool can_retry = true;
5295 	void *ret;
5296 
5297 	VM_WARN_ON_ONCE(gfp_flags & ~(__GFP_ACCOUNT | __GFP_ZERO |
5298 				      __GFP_NO_OBJ_EXT));
5299 
5300 	if (unlikely(!size))
5301 		return ZERO_SIZE_PTR;
5302 
5303 	/*
5304 	 * See the comment for the same check in
5305 	 * alloc_frozen_pages_nolock_noprof()
5306 	 */
5307 	if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
5308 		return NULL;
5309 
5310 retry:
5311 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
5312 		return NULL;
5313 	s = kmalloc_slab(size, NULL, alloc_gfp, _RET_IP_);
5314 
5315 	if (!(s->flags & __CMPXCHG_DOUBLE) && !kmem_cache_debug(s))
5316 		/*
5317 		 * kmalloc_nolock() is not supported on architectures that
5318 		 * don't implement cmpxchg16b and thus need slab_lock()
5319 		 * which could be preempted by a nmi.
5320 		 * But debug caches don't use that and only rely on
5321 		 * kmem_cache_node->list_lock, so kmalloc_nolock() can attempt
5322 		 * to allocate from debug caches by
5323 		 * spin_trylock_irqsave(&n->list_lock, ...)
5324 		 */
5325 		return NULL;
5326 
5327 	ret = alloc_from_pcs(s, alloc_gfp, node);
5328 	if (ret)
5329 		goto success;
5330 
5331 	/*
5332 	 * Do not call slab_alloc_node(), since trylock mode isn't
5333 	 * compatible with slab_pre_alloc_hook/should_failslab and
5334 	 * kfence_alloc. Hence call __slab_alloc_node() (at most twice)
5335 	 * and slab_post_alloc_hook() directly.
5336 	 */
5337 	ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size);
5338 
5339 	/*
5340 	 * It's possible we failed due to trylock as we preempted someone with
5341 	 * the sheaves locked, and the list_lock is also held by another cpu.
5342 	 * But it should be rare that multiple kmalloc buckets would have
5343 	 * sheaves locked, so try a larger one.
5344 	 */
5345 	if (!ret && can_retry) {
5346 		/* pick the next kmalloc bucket */
5347 		size = s->object_size + 1;
5348 		/*
5349 		 * Another alternative is to
5350 		 * if (memcg) alloc_gfp &= ~__GFP_ACCOUNT;
5351 		 * else if (!memcg) alloc_gfp |= __GFP_ACCOUNT;
5352 		 * to retry from bucket of the same size.
5353 		 */
5354 		can_retry = false;
5355 		goto retry;
5356 	}
5357 
5358 success:
5359 	maybe_wipe_obj_freeptr(s, ret);
5360 	slab_post_alloc_hook(s, NULL, alloc_gfp, 1, &ret,
5361 			     slab_want_init_on_alloc(alloc_gfp, s), size);
5362 
5363 	ret = kasan_kmalloc(s, ret, size, alloc_gfp);
5364 	return ret;
5365 }
5366 EXPORT_SYMBOL_GPL(kmalloc_nolock_noprof);
5367 
__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS (size,b),gfp_t flags,int node,unsigned long caller)5368 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
5369 					 int node, unsigned long caller)
5370 {
5371 	return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
5372 
5373 }
5374 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
5375 
__kmalloc_cache_noprof(struct kmem_cache * s,gfp_t gfpflags,size_t size)5376 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
5377 {
5378 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
5379 					    _RET_IP_, size);
5380 
5381 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
5382 
5383 	ret = kasan_kmalloc(s, ret, size, gfpflags);
5384 	return ret;
5385 }
5386 EXPORT_SYMBOL(__kmalloc_cache_noprof);
5387 
__kmalloc_cache_node_noprof(struct kmem_cache * s,gfp_t gfpflags,int node,size_t size)5388 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
5389 				  int node, size_t size)
5390 {
5391 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
5392 
5393 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
5394 
5395 	ret = kasan_kmalloc(s, ret, size, gfpflags);
5396 	return ret;
5397 }
5398 EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
5399 
free_to_partial_list(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int bulk_cnt,unsigned long addr)5400 static noinline void free_to_partial_list(
5401 	struct kmem_cache *s, struct slab *slab,
5402 	void *head, void *tail, int bulk_cnt,
5403 	unsigned long addr)
5404 {
5405 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
5406 	struct slab *slab_free = NULL;
5407 	int cnt = bulk_cnt;
5408 	unsigned long flags;
5409 	depot_stack_handle_t handle = 0;
5410 
5411 	/*
5412 	 * We cannot use GFP_NOWAIT as there are callsites where waking up
5413 	 * kswapd could deadlock
5414 	 */
5415 	if (s->flags & SLAB_STORE_USER)
5416 		handle = set_track_prepare(__GFP_NOWARN);
5417 
5418 	spin_lock_irqsave(&n->list_lock, flags);
5419 
5420 	if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
5421 		void *prior = slab->freelist;
5422 
5423 		/* Perform the actual freeing while we still hold the locks */
5424 		slab->inuse -= cnt;
5425 		set_freepointer(s, tail, prior);
5426 		slab->freelist = head;
5427 
5428 		/*
5429 		 * If the slab is empty, and node's partial list is full,
5430 		 * it should be discarded anyway no matter it's on full or
5431 		 * partial list.
5432 		 */
5433 		if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
5434 			slab_free = slab;
5435 
5436 		if (!prior) {
5437 			/* was on full list */
5438 			remove_full(s, n, slab);
5439 			if (!slab_free) {
5440 				add_partial(n, slab, ADD_TO_TAIL);
5441 				stat(s, FREE_ADD_PARTIAL);
5442 			}
5443 		} else if (slab_free) {
5444 			remove_partial(n, slab);
5445 			stat(s, FREE_REMOVE_PARTIAL);
5446 		}
5447 	}
5448 
5449 	if (slab_free) {
5450 		/*
5451 		 * Update the counters while still holding n->list_lock to
5452 		 * prevent spurious validation warnings
5453 		 */
5454 		dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
5455 	}
5456 
5457 	spin_unlock_irqrestore(&n->list_lock, flags);
5458 
5459 	if (slab_free) {
5460 		stat(s, FREE_SLAB);
5461 		free_slab(s, slab_free);
5462 	}
5463 }
5464 
5465 /*
5466  * Slow path handling. This may still be called frequently since objects
5467  * have a longer lifetime than the cpu slabs in most processing loads.
5468  *
5469  * So we still attempt to reduce cache line usage. Just take the slab
5470  * lock and free the item. If there is no additional partial slab
5471  * handling required then we can return immediately.
5472  */
__slab_free(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int cnt,unsigned long addr)5473 static void __slab_free(struct kmem_cache *s, struct slab *slab,
5474 			void *head, void *tail, int cnt,
5475 			unsigned long addr)
5476 
5477 {
5478 	bool was_full;
5479 	struct freelist_counters old, new;
5480 	struct kmem_cache_node *n = NULL;
5481 	unsigned long flags;
5482 	bool on_node_partial;
5483 
5484 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
5485 		free_to_partial_list(s, slab, head, tail, cnt, addr);
5486 		return;
5487 	}
5488 
5489 	do {
5490 		if (unlikely(n)) {
5491 			spin_unlock_irqrestore(&n->list_lock, flags);
5492 			n = NULL;
5493 		}
5494 
5495 		old.freelist = slab->freelist;
5496 		old.counters = slab->counters;
5497 
5498 		was_full = (old.freelist == NULL);
5499 
5500 		set_freepointer(s, tail, old.freelist);
5501 
5502 		new.freelist = head;
5503 		new.counters = old.counters;
5504 		new.inuse -= cnt;
5505 
5506 		/*
5507 		 * Might need to be taken off (due to becoming empty) or added
5508 		 * to (due to not being full anymore) the partial list.
5509 		 * Unless it's frozen.
5510 		 */
5511 		if (!new.inuse || was_full) {
5512 
5513 			n = get_node(s, slab_nid(slab));
5514 			/*
5515 			 * Speculatively acquire the list_lock.
5516 			 * If the cmpxchg does not succeed then we may
5517 			 * drop the list_lock without any processing.
5518 			 *
5519 			 * Otherwise the list_lock will synchronize with
5520 			 * other processors updating the list of slabs.
5521 			 */
5522 			spin_lock_irqsave(&n->list_lock, flags);
5523 
5524 			on_node_partial = slab_test_node_partial(slab);
5525 		}
5526 
5527 	} while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
5528 
5529 	if (likely(!n)) {
5530 		/*
5531 		 * We didn't take the list_lock because the slab was already on
5532 		 * the partial list and will remain there.
5533 		 */
5534 		return;
5535 	}
5536 
5537 	/*
5538 	 * This slab was partially empty but not on the per-node partial list,
5539 	 * in which case we shouldn't manipulate its list, just return.
5540 	 */
5541 	if (!was_full && !on_node_partial) {
5542 		spin_unlock_irqrestore(&n->list_lock, flags);
5543 		return;
5544 	}
5545 
5546 	/*
5547 	 * If slab became empty, should we add/keep it on the partial list or we
5548 	 * have enough?
5549 	 */
5550 	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
5551 		goto slab_empty;
5552 
5553 	/*
5554 	 * Objects left in the slab. If it was not on the partial list before
5555 	 * then add it.
5556 	 */
5557 	if (unlikely(was_full)) {
5558 		add_partial(n, slab, ADD_TO_TAIL);
5559 		stat(s, FREE_ADD_PARTIAL);
5560 	}
5561 	spin_unlock_irqrestore(&n->list_lock, flags);
5562 	return;
5563 
5564 slab_empty:
5565 	/*
5566 	 * The slab could have a single object and thus go from full to empty in
5567 	 * a single free, but more likely it was on the partial list. Remove it.
5568 	 */
5569 	if (likely(!was_full)) {
5570 		remove_partial(n, slab);
5571 		stat(s, FREE_REMOVE_PARTIAL);
5572 	}
5573 
5574 	spin_unlock_irqrestore(&n->list_lock, flags);
5575 	stat(s, FREE_SLAB);
5576 	discard_slab(s, slab);
5577 }
5578 
5579 /*
5580  * pcs is locked. We should have get rid of the spare sheaf and obtained an
5581  * empty sheaf, while the main sheaf is full. We want to install the empty sheaf
5582  * as a main sheaf, and make the current main sheaf a spare sheaf.
5583  *
5584  * However due to having relinquished the cpu_sheaves lock when obtaining
5585  * the empty sheaf, we need to handle some unlikely but possible cases.
5586  *
5587  * If we put any sheaf to barn here, it's because we were interrupted or have
5588  * been migrated to a different cpu, which should be rare enough so just ignore
5589  * the barn's limits to simplify the handling.
5590  *
5591  * An alternative scenario that gets us here is when we fail
5592  * barn_replace_full_sheaf(), because there's no empty sheaf available in the
5593  * barn, so we had to allocate it by alloc_empty_sheaf(). But because we saw the
5594  * limit on full sheaves was not exceeded, we assume it didn't change and just
5595  * put the full sheaf there.
5596  */
__pcs_install_empty_sheaf(struct kmem_cache * s,struct slub_percpu_sheaves * pcs,struct slab_sheaf * empty,struct node_barn * barn)5597 static void __pcs_install_empty_sheaf(struct kmem_cache *s,
5598 		struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty,
5599 		struct node_barn *barn)
5600 {
5601 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
5602 
5603 	/* This is what we expect to find if nobody interrupted us. */
5604 	if (likely(!pcs->spare)) {
5605 		pcs->spare = pcs->main;
5606 		pcs->main = empty;
5607 		return;
5608 	}
5609 
5610 	/*
5611 	 * Unlikely because if the main sheaf had space, we would have just
5612 	 * freed to it. Get rid of our empty sheaf.
5613 	 */
5614 	if (pcs->main->size < s->sheaf_capacity) {
5615 		barn_put_empty_sheaf(barn, empty);
5616 		return;
5617 	}
5618 
5619 	/* Also unlikely for the same reason */
5620 	if (pcs->spare->size < s->sheaf_capacity) {
5621 		swap(pcs->main, pcs->spare);
5622 		barn_put_empty_sheaf(barn, empty);
5623 		return;
5624 	}
5625 
5626 	/*
5627 	 * We probably failed barn_replace_full_sheaf() due to no empty sheaf
5628 	 * available there, but we allocated one, so finish the job.
5629 	 */
5630 	barn_put_full_sheaf(barn, pcs->main);
5631 	stat(s, BARN_PUT);
5632 	pcs->main = empty;
5633 }
5634 
5635 /*
5636  * Replace the full main sheaf with a (at least partially) empty sheaf.
5637  *
5638  * Must be called with the cpu_sheaves local lock locked. If successful, returns
5639  * the pcs pointer and the local lock locked (possibly on a different cpu than
5640  * initially called). If not successful, returns NULL and the local lock
5641  * unlocked.
5642  */
5643 static struct slub_percpu_sheaves *
__pcs_replace_full_main(struct kmem_cache * s,struct slub_percpu_sheaves * pcs,bool allow_spin)5644 __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
5645 			bool allow_spin)
5646 {
5647 	struct slab_sheaf *empty;
5648 	struct node_barn *barn;
5649 	bool put_fail;
5650 
5651 restart:
5652 	lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
5653 
5654 	/* Bootstrap or debug cache, back off */
5655 	if (unlikely(!cache_has_sheaves(s))) {
5656 		local_unlock(&s->cpu_sheaves->lock);
5657 		return NULL;
5658 	}
5659 
5660 	barn = get_barn(s);
5661 	if (!barn) {
5662 		local_unlock(&s->cpu_sheaves->lock);
5663 		return NULL;
5664 	}
5665 
5666 	put_fail = false;
5667 
5668 	if (!pcs->spare) {
5669 		empty = barn_get_empty_sheaf(barn, allow_spin);
5670 		if (empty) {
5671 			pcs->spare = pcs->main;
5672 			pcs->main = empty;
5673 			return pcs;
5674 		}
5675 		goto alloc_empty;
5676 	}
5677 
5678 	if (pcs->spare->size < s->sheaf_capacity) {
5679 		swap(pcs->main, pcs->spare);
5680 		return pcs;
5681 	}
5682 
5683 	empty = barn_replace_full_sheaf(barn, pcs->main, allow_spin);
5684 
5685 	if (!IS_ERR(empty)) {
5686 		stat(s, BARN_PUT);
5687 		pcs->main = empty;
5688 		return pcs;
5689 	}
5690 
5691 	/* sheaf_flush_unused() doesn't support !allow_spin */
5692 	if (PTR_ERR(empty) == -E2BIG && allow_spin) {
5693 		/* Since we got here, spare exists and is full */
5694 		struct slab_sheaf *to_flush = pcs->spare;
5695 
5696 		stat(s, BARN_PUT_FAIL);
5697 
5698 		pcs->spare = NULL;
5699 		local_unlock(&s->cpu_sheaves->lock);
5700 
5701 		sheaf_flush_unused(s, to_flush);
5702 		empty = to_flush;
5703 		goto got_empty;
5704 	}
5705 
5706 	/*
5707 	 * We could not replace full sheaf because barn had no empty
5708 	 * sheaves. We can still allocate it and put the full sheaf in
5709 	 * __pcs_install_empty_sheaf(), but if we fail to allocate it,
5710 	 * make sure to count the fail.
5711 	 */
5712 	put_fail = true;
5713 
5714 alloc_empty:
5715 	local_unlock(&s->cpu_sheaves->lock);
5716 
5717 	/*
5718 	 * alloc_empty_sheaf() doesn't support !allow_spin and it's
5719 	 * easier to fall back to freeing directly without sheaves
5720 	 * than add the support (and to sheaf_flush_unused() above)
5721 	 */
5722 	if (!allow_spin)
5723 		return NULL;
5724 
5725 	empty = alloc_empty_sheaf(s, GFP_NOWAIT);
5726 	if (empty)
5727 		goto got_empty;
5728 
5729 	if (put_fail)
5730 		 stat(s, BARN_PUT_FAIL);
5731 
5732 	if (!sheaf_try_flush_main(s))
5733 		return NULL;
5734 
5735 	if (!local_trylock(&s->cpu_sheaves->lock))
5736 		return NULL;
5737 
5738 	pcs = this_cpu_ptr(s->cpu_sheaves);
5739 
5740 	/*
5741 	 * we flushed the main sheaf so it should be empty now,
5742 	 * but in case we got preempted or migrated, we need to
5743 	 * check again
5744 	 */
5745 	if (pcs->main->size == s->sheaf_capacity)
5746 		goto restart;
5747 
5748 	return pcs;
5749 
5750 got_empty:
5751 	if (!local_trylock(&s->cpu_sheaves->lock)) {
5752 		barn_put_empty_sheaf(barn, empty);
5753 		return NULL;
5754 	}
5755 
5756 	pcs = this_cpu_ptr(s->cpu_sheaves);
5757 	__pcs_install_empty_sheaf(s, pcs, empty, barn);
5758 
5759 	return pcs;
5760 }
5761 
5762 /*
5763  * Free an object to the percpu sheaves.
5764  * The object is expected to have passed slab_free_hook() already.
5765  */
5766 static __fastpath_inline
free_to_pcs(struct kmem_cache * s,void * object,bool allow_spin)5767 bool free_to_pcs(struct kmem_cache *s, void *object, bool allow_spin)
5768 {
5769 	struct slub_percpu_sheaves *pcs;
5770 
5771 	if (!local_trylock(&s->cpu_sheaves->lock))
5772 		return false;
5773 
5774 	pcs = this_cpu_ptr(s->cpu_sheaves);
5775 
5776 	if (unlikely(pcs->main->size == s->sheaf_capacity)) {
5777 
5778 		pcs = __pcs_replace_full_main(s, pcs, allow_spin);
5779 		if (unlikely(!pcs))
5780 			return false;
5781 	}
5782 
5783 	pcs->main->objects[pcs->main->size++] = object;
5784 
5785 	local_unlock(&s->cpu_sheaves->lock);
5786 
5787 	stat(s, FREE_FASTPATH);
5788 
5789 	return true;
5790 }
5791 
rcu_free_sheaf(struct rcu_head * head)5792 static void rcu_free_sheaf(struct rcu_head *head)
5793 {
5794 	struct kmem_cache_node *n;
5795 	struct slab_sheaf *sheaf;
5796 	struct node_barn *barn = NULL;
5797 	struct kmem_cache *s;
5798 
5799 	sheaf = container_of(head, struct slab_sheaf, rcu_head);
5800 
5801 	s = sheaf->cache;
5802 
5803 	/*
5804 	 * This may remove some objects due to slab_free_hook() returning false,
5805 	 * so that the sheaf might no longer be completely full. But it's easier
5806 	 * to handle it as full (unless it became completely empty), as the code
5807 	 * handles it fine. The only downside is that sheaf will serve fewer
5808 	 * allocations when reused. It only happens due to debugging, which is a
5809 	 * performance hit anyway.
5810 	 *
5811 	 * If it returns true, there was at least one object from pfmemalloc
5812 	 * slab so simply flush everything.
5813 	 */
5814 	if (__rcu_free_sheaf_prepare(s, sheaf))
5815 		goto flush;
5816 
5817 	n = get_node(s, sheaf->node);
5818 	if (!n)
5819 		goto flush;
5820 
5821 	barn = n->barn;
5822 
5823 	/* due to slab_free_hook() */
5824 	if (unlikely(sheaf->size == 0))
5825 		goto empty;
5826 
5827 	/*
5828 	 * Checking nr_full/nr_empty outside lock avoids contention in case the
5829 	 * barn is at the respective limit. Due to the race we might go over the
5830 	 * limit but that should be rare and harmless.
5831 	 */
5832 
5833 	if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) {
5834 		stat(s, BARN_PUT);
5835 		barn_put_full_sheaf(barn, sheaf);
5836 		return;
5837 	}
5838 
5839 flush:
5840 	stat(s, BARN_PUT_FAIL);
5841 	sheaf_flush_unused(s, sheaf);
5842 
5843 empty:
5844 	if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
5845 		barn_put_empty_sheaf(barn, sheaf);
5846 		return;
5847 	}
5848 
5849 	free_empty_sheaf(s, sheaf);
5850 }
5851 
5852 /*
5853  * kvfree_call_rcu() can be called while holding a raw_spinlock_t. Since
5854  * __kfree_rcu_sheaf() may acquire a spinlock_t (sleeping lock on PREEMPT_RT),
5855  * this would violate lock nesting rules. Therefore, kvfree_call_rcu() avoids
5856  * this problem by bypassing the sheaves layer entirely on PREEMPT_RT.
5857  *
5858  * However, lockdep still complains that it is invalid to acquire spinlock_t
5859  * while holding raw_spinlock_t, even on !PREEMPT_RT where spinlock_t is a
5860  * spinning lock. Tell lockdep that acquiring spinlock_t is valid here
5861  * by temporarily raising the wait-type to LD_WAIT_CONFIG.
5862  */
5863 static DEFINE_WAIT_OVERRIDE_MAP(kfree_rcu_sheaf_map, LD_WAIT_CONFIG);
5864 
__kfree_rcu_sheaf(struct kmem_cache * s,void * obj)5865 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
5866 {
5867 	struct slub_percpu_sheaves *pcs;
5868 	struct slab_sheaf *rcu_sheaf;
5869 
5870 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
5871 		return false;
5872 
5873 	lock_map_acquire_try(&kfree_rcu_sheaf_map);
5874 
5875 	if (!local_trylock(&s->cpu_sheaves->lock))
5876 		goto fail;
5877 
5878 	pcs = this_cpu_ptr(s->cpu_sheaves);
5879 
5880 	if (unlikely(!pcs->rcu_free)) {
5881 
5882 		struct slab_sheaf *empty;
5883 		struct node_barn *barn;
5884 
5885 		/* Bootstrap or debug cache, fall back */
5886 		if (unlikely(!cache_has_sheaves(s))) {
5887 			local_unlock(&s->cpu_sheaves->lock);
5888 			goto fail;
5889 		}
5890 
5891 		if (pcs->spare && pcs->spare->size == 0) {
5892 			pcs->rcu_free = pcs->spare;
5893 			pcs->spare = NULL;
5894 			goto do_free;
5895 		}
5896 
5897 		barn = get_barn(s);
5898 		if (!barn) {
5899 			local_unlock(&s->cpu_sheaves->lock);
5900 			goto fail;
5901 		}
5902 
5903 		empty = barn_get_empty_sheaf(barn, true);
5904 
5905 		if (empty) {
5906 			pcs->rcu_free = empty;
5907 			goto do_free;
5908 		}
5909 
5910 		local_unlock(&s->cpu_sheaves->lock);
5911 
5912 		empty = alloc_empty_sheaf(s, GFP_NOWAIT);
5913 
5914 		if (!empty)
5915 			goto fail;
5916 
5917 		if (!local_trylock(&s->cpu_sheaves->lock)) {
5918 			barn_put_empty_sheaf(barn, empty);
5919 			goto fail;
5920 		}
5921 
5922 		pcs = this_cpu_ptr(s->cpu_sheaves);
5923 
5924 		if (unlikely(pcs->rcu_free))
5925 			barn_put_empty_sheaf(barn, empty);
5926 		else
5927 			pcs->rcu_free = empty;
5928 	}
5929 
5930 do_free:
5931 
5932 	rcu_sheaf = pcs->rcu_free;
5933 
5934 	/*
5935 	 * Since we flush immediately when size reaches capacity, we never reach
5936 	 * this with size already at capacity, so no OOB write is possible.
5937 	 */
5938 	rcu_sheaf->objects[rcu_sheaf->size++] = obj;
5939 
5940 	if (likely(rcu_sheaf->size < s->sheaf_capacity)) {
5941 		rcu_sheaf = NULL;
5942 	} else {
5943 		pcs->rcu_free = NULL;
5944 		rcu_sheaf->node = numa_mem_id();
5945 	}
5946 
5947 	/*
5948 	 * we flush before local_unlock to make sure a racing
5949 	 * flush_all_rcu_sheaves() doesn't miss this sheaf
5950 	 */
5951 	if (rcu_sheaf)
5952 		call_rcu(&rcu_sheaf->rcu_head, rcu_free_sheaf);
5953 
5954 	local_unlock(&s->cpu_sheaves->lock);
5955 
5956 	stat(s, FREE_RCU_SHEAF);
5957 	lock_map_release(&kfree_rcu_sheaf_map);
5958 	return true;
5959 
5960 fail:
5961 	stat(s, FREE_RCU_SHEAF_FAIL);
5962 	lock_map_release(&kfree_rcu_sheaf_map);
5963 	return false;
5964 }
5965 
5966 /*
5967  * Bulk free objects to the percpu sheaves.
5968  * Unlike free_to_pcs() this includes the calls to all necessary hooks
5969  * and the fallback to freeing to slab pages.
5970  */
free_to_pcs_bulk(struct kmem_cache * s,size_t size,void ** p)5971 static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
5972 {
5973 	struct slub_percpu_sheaves *pcs;
5974 	struct slab_sheaf *main, *empty;
5975 	bool init = slab_want_init_on_free(s);
5976 	unsigned int batch, i = 0;
5977 	struct node_barn *barn;
5978 	void *remote_objects[PCS_BATCH_MAX];
5979 	unsigned int remote_nr = 0;
5980 	int node = numa_mem_id();
5981 
5982 next_remote_batch:
5983 	while (i < size) {
5984 		struct slab *slab = virt_to_slab(p[i]);
5985 
5986 		memcg_slab_free_hook(s, slab, p + i, 1);
5987 		alloc_tagging_slab_free_hook(s, slab, p + i, 1);
5988 
5989 		if (unlikely(!slab_free_hook(s, p[i], init, false))) {
5990 			p[i] = p[--size];
5991 			continue;
5992 		}
5993 
5994 		if (unlikely((IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)
5995 			     || slab_test_pfmemalloc(slab))) {
5996 			remote_objects[remote_nr] = p[i];
5997 			p[i] = p[--size];
5998 			if (++remote_nr >= PCS_BATCH_MAX)
5999 				goto flush_remote;
6000 			continue;
6001 		}
6002 
6003 		i++;
6004 	}
6005 
6006 	if (!size)
6007 		goto flush_remote;
6008 
6009 next_batch:
6010 	if (!local_trylock(&s->cpu_sheaves->lock))
6011 		goto fallback;
6012 
6013 	pcs = this_cpu_ptr(s->cpu_sheaves);
6014 
6015 	if (likely(pcs->main->size < s->sheaf_capacity))
6016 		goto do_free;
6017 
6018 	barn = get_barn(s);
6019 	if (!barn)
6020 		goto no_empty;
6021 
6022 	if (!pcs->spare) {
6023 		empty = barn_get_empty_sheaf(barn, true);
6024 		if (!empty)
6025 			goto no_empty;
6026 
6027 		pcs->spare = pcs->main;
6028 		pcs->main = empty;
6029 		goto do_free;
6030 	}
6031 
6032 	if (pcs->spare->size < s->sheaf_capacity) {
6033 		swap(pcs->main, pcs->spare);
6034 		goto do_free;
6035 	}
6036 
6037 	empty = barn_replace_full_sheaf(barn, pcs->main, true);
6038 	if (IS_ERR(empty)) {
6039 		stat(s, BARN_PUT_FAIL);
6040 		goto no_empty;
6041 	}
6042 
6043 	stat(s, BARN_PUT);
6044 	pcs->main = empty;
6045 
6046 do_free:
6047 	main = pcs->main;
6048 	batch = min(size, s->sheaf_capacity - main->size);
6049 
6050 	memcpy(main->objects + main->size, p, batch * sizeof(void *));
6051 	main->size += batch;
6052 
6053 	local_unlock(&s->cpu_sheaves->lock);
6054 
6055 	stat_add(s, FREE_FASTPATH, batch);
6056 
6057 	if (batch < size) {
6058 		p += batch;
6059 		size -= batch;
6060 		goto next_batch;
6061 	}
6062 
6063 	if (remote_nr)
6064 		goto flush_remote;
6065 
6066 	return;
6067 
6068 no_empty:
6069 	local_unlock(&s->cpu_sheaves->lock);
6070 
6071 	/*
6072 	 * if we depleted all empty sheaves in the barn or there are too
6073 	 * many full sheaves, free the rest to slab pages
6074 	 */
6075 fallback:
6076 	__kmem_cache_free_bulk(s, size, p);
6077 	stat_add(s, FREE_SLOWPATH, size);
6078 
6079 flush_remote:
6080 	if (remote_nr) {
6081 		__kmem_cache_free_bulk(s, remote_nr, &remote_objects[0]);
6082 		stat_add(s, FREE_SLOWPATH, remote_nr);
6083 		if (i < size) {
6084 			remote_nr = 0;
6085 			goto next_remote_batch;
6086 		}
6087 	}
6088 }
6089 
6090 struct defer_free {
6091 	struct llist_head objects;
6092 	struct irq_work work;
6093 };
6094 
6095 static void free_deferred_objects(struct irq_work *work);
6096 
6097 static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = {
6098 	.objects = LLIST_HEAD_INIT(objects),
6099 	.work = IRQ_WORK_INIT(free_deferred_objects),
6100 };
6101 
6102 /*
6103  * In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe
6104  * to take sleeping spin_locks from __slab_free().
6105  * In !PREEMPT_RT irq_work will run after local_unlock_irqrestore().
6106  */
free_deferred_objects(struct irq_work * work)6107 static void free_deferred_objects(struct irq_work *work)
6108 {
6109 	struct defer_free *df = container_of(work, struct defer_free, work);
6110 	struct llist_head *objs = &df->objects;
6111 	struct llist_node *llnode, *pos, *t;
6112 
6113 	if (llist_empty(objs))
6114 		return;
6115 
6116 	llnode = llist_del_all(objs);
6117 	llist_for_each_safe(pos, t, llnode) {
6118 		struct kmem_cache *s;
6119 		struct slab *slab;
6120 		void *x = pos;
6121 
6122 		slab = virt_to_slab(x);
6123 		s = slab->slab_cache;
6124 
6125 		/* Point 'x' back to the beginning of allocated object */
6126 		x -= s->offset;
6127 
6128 		/*
6129 		 * We used freepointer in 'x' to link 'x' into df->objects.
6130 		 * Clear it to NULL to avoid false positive detection
6131 		 * of "Freepointer corruption".
6132 		 */
6133 		set_freepointer(s, x, NULL);
6134 
6135 		__slab_free(s, slab, x, x, 1, _THIS_IP_);
6136 		stat(s, FREE_SLOWPATH);
6137 	}
6138 }
6139 
defer_free(struct kmem_cache * s,void * head)6140 static void defer_free(struct kmem_cache *s, void *head)
6141 {
6142 	struct defer_free *df;
6143 
6144 	guard(preempt)();
6145 
6146 	head = kasan_reset_tag(head);
6147 
6148 	df = this_cpu_ptr(&defer_free_objects);
6149 	if (llist_add(head + s->offset, &df->objects))
6150 		irq_work_queue(&df->work);
6151 }
6152 
defer_free_barrier(void)6153 void defer_free_barrier(void)
6154 {
6155 	int cpu;
6156 
6157 	for_each_possible_cpu(cpu)
6158 		irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
6159 }
6160 
6161 static __fastpath_inline
slab_free(struct kmem_cache * s,struct slab * slab,void * object,unsigned long addr)6162 void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
6163 	       unsigned long addr)
6164 {
6165 	memcg_slab_free_hook(s, slab, &object, 1);
6166 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
6167 
6168 	if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), false)))
6169 		return;
6170 
6171 	if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())
6172 	    && likely(!slab_test_pfmemalloc(slab))) {
6173 		if (likely(free_to_pcs(s, object, true)))
6174 			return;
6175 	}
6176 
6177 	__slab_free(s, slab, object, object, 1, addr);
6178 	stat(s, FREE_SLOWPATH);
6179 }
6180 
6181 #ifdef CONFIG_MEMCG
6182 /* Do not inline the rare memcg charging failed path into the allocation path */
6183 static noinline
memcg_alloc_abort_single(struct kmem_cache * s,void * object)6184 void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
6185 {
6186 	struct slab *slab = virt_to_slab(object);
6187 
6188 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
6189 
6190 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
6191 		__slab_free(s, slab, object, object, 1, _RET_IP_);
6192 }
6193 #endif
6194 
6195 static __fastpath_inline
slab_free_bulk(struct kmem_cache * s,struct slab * slab,void * head,void * tail,void ** p,int cnt,unsigned long addr)6196 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
6197 		    void *tail, void **p, int cnt, unsigned long addr)
6198 {
6199 	memcg_slab_free_hook(s, slab, p, cnt);
6200 	alloc_tagging_slab_free_hook(s, slab, p, cnt);
6201 	/*
6202 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
6203 	 * to remove objects, whose reuse must be delayed.
6204 	 */
6205 	if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) {
6206 		__slab_free(s, slab, head, tail, cnt, addr);
6207 		stat_add(s, FREE_SLOWPATH, cnt);
6208 	}
6209 }
6210 
6211 #ifdef CONFIG_SLUB_RCU_DEBUG
slab_free_after_rcu_debug(struct rcu_head * rcu_head)6212 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
6213 {
6214 	struct rcu_delayed_free *delayed_free =
6215 			container_of(rcu_head, struct rcu_delayed_free, head);
6216 	void *object = delayed_free->object;
6217 	struct slab *slab = virt_to_slab(object);
6218 	struct kmem_cache *s;
6219 
6220 	kfree(delayed_free);
6221 
6222 	if (WARN_ON(is_kfence_address(object)))
6223 		return;
6224 
6225 	/* find the object and the cache again */
6226 	if (WARN_ON(!slab))
6227 		return;
6228 	s = slab->slab_cache;
6229 	if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
6230 		return;
6231 
6232 	/* resume freeing */
6233 	if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) {
6234 		__slab_free(s, slab, object, object, 1, _THIS_IP_);
6235 		stat(s, FREE_SLOWPATH);
6236 	}
6237 }
6238 #endif /* CONFIG_SLUB_RCU_DEBUG */
6239 
6240 #ifdef CONFIG_KASAN_GENERIC
___cache_free(struct kmem_cache * cache,void * x,unsigned long addr)6241 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
6242 {
6243 	__slab_free(cache, virt_to_slab(x), x, x, 1, addr);
6244 	stat(cache, FREE_SLOWPATH);
6245 }
6246 #endif
6247 
warn_free_bad_obj(struct kmem_cache * s,void * obj)6248 static noinline void warn_free_bad_obj(struct kmem_cache *s, void *obj)
6249 {
6250 	struct kmem_cache *cachep;
6251 	struct slab *slab;
6252 
6253 	slab = virt_to_slab(obj);
6254 	if (WARN_ONCE(!slab,
6255 			"kmem_cache_free(%s, %p): object is not in a slab page\n",
6256 			s->name, obj))
6257 		return;
6258 
6259 	cachep = slab->slab_cache;
6260 
6261 	if (WARN_ONCE(cachep != s,
6262 			"kmem_cache_free(%s, %p): object belongs to different cache %s\n",
6263 			s->name, obj, cachep ? cachep->name : "(NULL)")) {
6264 		if (cachep)
6265 			print_tracking(cachep, obj);
6266 		return;
6267 	}
6268 }
6269 
6270 /**
6271  * kmem_cache_free - Deallocate an object
6272  * @s: The cache the allocation was from.
6273  * @x: The previously allocated object.
6274  *
6275  * Free an object which was previously allocated from this
6276  * cache.
6277  */
kmem_cache_free(struct kmem_cache * s,void * x)6278 void kmem_cache_free(struct kmem_cache *s, void *x)
6279 {
6280 	struct slab *slab;
6281 
6282 	slab = virt_to_slab(x);
6283 
6284 	if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) ||
6285 	    kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
6286 
6287 		/*
6288 		 * Intentionally leak the object in these cases, because it
6289 		 * would be too dangerous to continue.
6290 		 */
6291 		if (unlikely(!slab || (slab->slab_cache != s))) {
6292 			warn_free_bad_obj(s, x);
6293 			return;
6294 		}
6295 	}
6296 
6297 	trace_kmem_cache_free(_RET_IP_, x, s);
6298 	slab_free(s, slab, x, _RET_IP_);
6299 }
6300 EXPORT_SYMBOL(kmem_cache_free);
6301 
slab_ksize(struct slab * slab)6302 static inline size_t slab_ksize(struct slab *slab)
6303 {
6304 	struct kmem_cache *s = slab->slab_cache;
6305 
6306 #ifdef CONFIG_SLUB_DEBUG
6307 	/*
6308 	 * Debugging requires use of the padding between object
6309 	 * and whatever may come after it.
6310 	 */
6311 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
6312 		return s->object_size;
6313 #endif
6314 	if (s->flags & SLAB_KASAN)
6315 		return s->object_size;
6316 	/*
6317 	 * If we have the need to store the freelist pointer
6318 	 * or any other metadata back there then we can
6319 	 * only use the space before that information.
6320 	 */
6321 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
6322 		return s->inuse;
6323 	else if (obj_exts_in_object(s, slab))
6324 		return s->inuse;
6325 	/*
6326 	 * Else we can use all the padding etc for the allocation
6327 	 */
6328 	return s->size;
6329 }
6330 
__ksize(const void * object)6331 static size_t __ksize(const void *object)
6332 {
6333 	struct page *page;
6334 	struct slab *slab;
6335 
6336 	if (unlikely(object == ZERO_SIZE_PTR))
6337 		return 0;
6338 
6339 	page = virt_to_page(object);
6340 
6341 	if (unlikely(PageLargeKmalloc(page)))
6342 		return large_kmalloc_size(page);
6343 
6344 	slab = page_slab(page);
6345 	/* Delete this after we're sure there are no users */
6346 	if (WARN_ON(!slab))
6347 		return page_size(page);
6348 
6349 #ifdef CONFIG_SLUB_DEBUG
6350 	skip_orig_size_check(slab->slab_cache, object);
6351 #endif
6352 
6353 	return slab_ksize(slab);
6354 }
6355 
6356 /**
6357  * ksize -- Report full size of underlying allocation
6358  * @objp: pointer to the object
6359  *
6360  * This should only be used internally to query the true size of allocations.
6361  * It is not meant to be a way to discover the usable size of an allocation
6362  * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
6363  * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
6364  * and/or FORTIFY_SOURCE.
6365  *
6366  * Return: size of the actual memory used by @objp in bytes
6367  */
ksize(const void * objp)6368 size_t ksize(const void *objp)
6369 {
6370 	/*
6371 	 * We need to first check that the pointer to the object is valid.
6372 	 * The KASAN report printed from ksize() is more useful, then when
6373 	 * it's printed later when the behaviour could be undefined due to
6374 	 * a potential use-after-free or double-free.
6375 	 *
6376 	 * We use kasan_check_byte(), which is supported for the hardware
6377 	 * tag-based KASAN mode, unlike kasan_check_read/write().
6378 	 *
6379 	 * If the pointed to memory is invalid, we return 0 to avoid users of
6380 	 * ksize() writing to and potentially corrupting the memory region.
6381 	 *
6382 	 * We want to perform the check before __ksize(), to avoid potentially
6383 	 * crashing in __ksize() due to accessing invalid metadata.
6384 	 */
6385 	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
6386 		return 0;
6387 
6388 	return kfence_ksize(objp) ?: __ksize(objp);
6389 }
6390 EXPORT_SYMBOL(ksize);
6391 
free_large_kmalloc(struct page * page,void * object)6392 static void free_large_kmalloc(struct page *page, void *object)
6393 {
6394 	unsigned int order = compound_order(page);
6395 
6396 	if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
6397 		dump_page(page, "Not a kmalloc allocation");
6398 		return;
6399 	}
6400 
6401 	if (WARN_ON_ONCE(order == 0))
6402 		pr_warn_once("object pointer: 0x%p\n", object);
6403 
6404 	kmemleak_free(object);
6405 	kasan_kfree_large(object);
6406 	kmsan_kfree_large(object);
6407 
6408 	mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
6409 			      -(PAGE_SIZE << order));
6410 	__ClearPageLargeKmalloc(page);
6411 	free_frozen_pages(page, order);
6412 }
6413 
6414 /*
6415  * Given an rcu_head embedded within an object obtained from kvmalloc at an
6416  * offset < 4k, free the object in question.
6417  */
kvfree_rcu_cb(struct rcu_head * head)6418 void kvfree_rcu_cb(struct rcu_head *head)
6419 {
6420 	void *obj = head;
6421 	struct page *page;
6422 	struct slab *slab;
6423 	struct kmem_cache *s;
6424 	void *slab_addr;
6425 
6426 	if (is_vmalloc_addr(obj)) {
6427 		obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
6428 		vfree(obj);
6429 		return;
6430 	}
6431 
6432 	page = virt_to_page(obj);
6433 	slab = page_slab(page);
6434 	if (!slab) {
6435 		/*
6436 		 * rcu_head offset can be only less than page size so no need to
6437 		 * consider allocation order
6438 		 */
6439 		obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
6440 		free_large_kmalloc(page, obj);
6441 		return;
6442 	}
6443 
6444 	s = slab->slab_cache;
6445 	slab_addr = slab_address(slab);
6446 
6447 	if (is_kfence_address(obj)) {
6448 		obj = kfence_object_start(obj);
6449 	} else {
6450 		unsigned int idx = __obj_to_index(s, slab_addr, obj);
6451 
6452 		obj = slab_addr + s->size * idx;
6453 		obj = fixup_red_left(s, obj);
6454 	}
6455 
6456 	slab_free(s, slab, obj, _RET_IP_);
6457 }
6458 
6459 /**
6460  * kfree - free previously allocated memory
6461  * @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc()
6462  *
6463  * If @object is NULL, no operation is performed.
6464  */
kfree(const void * object)6465 void kfree(const void *object)
6466 {
6467 	struct page *page;
6468 	struct slab *slab;
6469 	struct kmem_cache *s;
6470 	void *x = (void *)object;
6471 
6472 	trace_kfree(_RET_IP_, object);
6473 
6474 	if (unlikely(ZERO_OR_NULL_PTR(object)))
6475 		return;
6476 
6477 	page = virt_to_page(object);
6478 	slab = page_slab(page);
6479 	if (!slab) {
6480 		/* kmalloc_nolock() doesn't support large kmalloc */
6481 		free_large_kmalloc(page, (void *)object);
6482 		return;
6483 	}
6484 
6485 	s = slab->slab_cache;
6486 	slab_free(s, slab, x, _RET_IP_);
6487 }
6488 EXPORT_SYMBOL(kfree);
6489 
6490 /*
6491  * Can be called while holding raw_spinlock_t or from IRQ and NMI,
6492  * but ONLY for objects allocated by kmalloc_nolock().
6493  * Debug checks (like kmemleak and kfence) were skipped on allocation,
6494  * hence
6495  * obj = kmalloc(); kfree_nolock(obj);
6496  * will miss kmemleak/kfence book keeping and will cause false positives.
6497  * large_kmalloc is not supported either.
6498  */
kfree_nolock(const void * object)6499 void kfree_nolock(const void *object)
6500 {
6501 	struct slab *slab;
6502 	struct kmem_cache *s;
6503 	void *x = (void *)object;
6504 
6505 	if (unlikely(ZERO_OR_NULL_PTR(object)))
6506 		return;
6507 
6508 	slab = virt_to_slab(object);
6509 	if (unlikely(!slab)) {
6510 		WARN_ONCE(1, "large_kmalloc is not supported by kfree_nolock()");
6511 		return;
6512 	}
6513 
6514 	s = slab->slab_cache;
6515 
6516 	memcg_slab_free_hook(s, slab, &x, 1);
6517 	alloc_tagging_slab_free_hook(s, slab, &x, 1);
6518 	/*
6519 	 * Unlike slab_free() do NOT call the following:
6520 	 * kmemleak_free_recursive(x, s->flags);
6521 	 * debug_check_no_locks_freed(x, s->object_size);
6522 	 * debug_check_no_obj_freed(x, s->object_size);
6523 	 * __kcsan_check_access(x, s->object_size, ..);
6524 	 * kfence_free(x);
6525 	 * since they take spinlocks or not safe from any context.
6526 	 */
6527 	kmsan_slab_free(s, x);
6528 	/*
6529 	 * If KASAN finds a kernel bug it will do kasan_report_invalid_free()
6530 	 * which will call raw_spin_lock_irqsave() which is technically
6531 	 * unsafe from NMI, but take chance and report kernel bug.
6532 	 * The sequence of
6533 	 * kasan_report_invalid_free() -> raw_spin_lock_irqsave() -> NMI
6534 	 *  -> kfree_nolock() -> kasan_report_invalid_free() on the same CPU
6535 	 * is double buggy and deserves to deadlock.
6536 	 */
6537 	if (kasan_slab_pre_free(s, x))
6538 		return;
6539 	/*
6540 	 * memcg, kasan_slab_pre_free are done for 'x'.
6541 	 * The only thing left is kasan_poison without quarantine,
6542 	 * since kasan quarantine takes locks and not supported from NMI.
6543 	 */
6544 	kasan_slab_free(s, x, false, false, /* skip quarantine */true);
6545 
6546 	if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())) {
6547 		if (likely(free_to_pcs(s, x, false)))
6548 			return;
6549 	}
6550 
6551 	/*
6552 	 * __slab_free() can locklessly cmpxchg16 into a slab, but then it might
6553 	 * need to take spin_lock for further processing.
6554 	 * Avoid the complexity and simply add to a deferred list.
6555 	 */
6556 	defer_free(s, x);
6557 }
6558 EXPORT_SYMBOL_GPL(kfree_nolock);
6559 
6560 static __always_inline __realloc_size(2) void *
__do_krealloc(const void * p,size_t new_size,unsigned long align,gfp_t flags,int nid)6561 __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, int nid)
6562 {
6563 	void *ret;
6564 	size_t ks = 0;
6565 	int orig_size = 0;
6566 	struct kmem_cache *s = NULL;
6567 
6568 	if (unlikely(ZERO_OR_NULL_PTR(p)))
6569 		goto alloc_new;
6570 
6571 	/* Check for double-free. */
6572 	if (!kasan_check_byte(p))
6573 		return NULL;
6574 
6575 	/*
6576 	 * If reallocation is not necessary (e. g. the new size is less
6577 	 * than the current allocated size), the current allocation will be
6578 	 * preserved unless __GFP_THISNODE is set. In the latter case a new
6579 	 * allocation on the requested node will be attempted.
6580 	 */
6581 	if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
6582 		     nid != page_to_nid(virt_to_page(p)))
6583 		goto alloc_new;
6584 
6585 	if (is_kfence_address(p)) {
6586 		ks = orig_size = kfence_ksize(p);
6587 	} else {
6588 		struct page *page = virt_to_page(p);
6589 		struct slab *slab = page_slab(page);
6590 
6591 		if (!slab) {
6592 			/* Big kmalloc object */
6593 			ks = page_size(page);
6594 			WARN_ON(ks <= KMALLOC_MAX_CACHE_SIZE);
6595 			WARN_ON(p != page_address(page));
6596 		} else {
6597 			s = slab->slab_cache;
6598 			orig_size = get_orig_size(s, (void *)p);
6599 			ks = s->object_size;
6600 		}
6601 	}
6602 
6603 	/* If the old object doesn't fit, allocate a bigger one */
6604 	if (new_size > ks)
6605 		goto alloc_new;
6606 
6607 	/* If the old object doesn't satisfy the new alignment, allocate a new one */
6608 	if (!IS_ALIGNED((unsigned long)p, align))
6609 		goto alloc_new;
6610 
6611 	/* Zero out spare memory. */
6612 	if (want_init_on_alloc(flags)) {
6613 		kasan_disable_current();
6614 		if (orig_size && orig_size < new_size)
6615 			memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size);
6616 		else
6617 			memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
6618 		kasan_enable_current();
6619 	}
6620 
6621 	/* Setup kmalloc redzone when needed */
6622 	if (s && slub_debug_orig_size(s)) {
6623 		set_orig_size(s, (void *)p, new_size);
6624 		if (s->flags & SLAB_RED_ZONE && new_size < ks)
6625 			memset_no_sanitize_memory(kasan_reset_tag(p) + new_size,
6626 						SLUB_RED_ACTIVE, ks - new_size);
6627 	}
6628 
6629 	p = kasan_krealloc(p, new_size, flags);
6630 	return (void *)p;
6631 
6632 alloc_new:
6633 	ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
6634 	if (ret && p) {
6635 		/* Disable KASAN checks as the object's redzone is accessed. */
6636 		kasan_disable_current();
6637 		memcpy(ret, kasan_reset_tag(p), orig_size ?: ks);
6638 		kasan_enable_current();
6639 	}
6640 
6641 	return ret;
6642 }
6643 
6644 /**
6645  * krealloc_node_align - reallocate memory. The contents will remain unchanged.
6646  * @p: object to reallocate memory for.
6647  * @new_size: how many bytes of memory are required.
6648  * @align: desired alignment.
6649  * @flags: the type of memory to allocate.
6650  * @nid: NUMA node or NUMA_NO_NODE
6651  *
6652  * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
6653  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
6654  *
6655  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6656  * Documentation/core-api/memory-allocation.rst for more details.
6657  *
6658  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
6659  * initial memory allocation, every subsequent call to this API for the same
6660  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
6661  * __GFP_ZERO is not fully honored by this API.
6662  *
6663  * When slub_debug_orig_size() is off, krealloc() only knows about the bucket
6664  * size of an allocation (but not the exact size it was allocated with) and
6665  * hence implements the following semantics for shrinking and growing buffers
6666  * with __GFP_ZERO::
6667  *
6668  *           new             bucket
6669  *   0       size             size
6670  *   |--------|----------------|
6671  *   |  keep  |      zero      |
6672  *
6673  * Otherwise, the original allocation size 'orig_size' could be used to
6674  * precisely clear the requested size, and the new size will also be stored
6675  * as the new 'orig_size'.
6676  *
6677  * In any case, the contents of the object pointed to are preserved up to the
6678  * lesser of the new and old sizes.
6679  *
6680  * Return: pointer to the allocated memory or %NULL in case of error
6681  */
krealloc_node_align_noprof(const void * p,size_t new_size,unsigned long align,gfp_t flags,int nid)6682 void *krealloc_node_align_noprof(const void *p, size_t new_size, unsigned long align,
6683 				 gfp_t flags, int nid)
6684 {
6685 	void *ret;
6686 
6687 	if (unlikely(!new_size)) {
6688 		kfree(p);
6689 		return ZERO_SIZE_PTR;
6690 	}
6691 
6692 	ret = __do_krealloc(p, new_size, align, flags, nid);
6693 	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
6694 		kfree(p);
6695 
6696 	return ret;
6697 }
6698 EXPORT_SYMBOL(krealloc_node_align_noprof);
6699 
kmalloc_gfp_adjust(gfp_t flags,size_t size)6700 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
6701 {
6702 	/*
6703 	 * We want to attempt a large physically contiguous block first because
6704 	 * it is less likely to fragment multiple larger blocks and therefore
6705 	 * contribute to a long term fragmentation less than vmalloc fallback.
6706 	 * However make sure that larger requests are not too disruptive - i.e.
6707 	 * do not direct reclaim unless physically continuous memory is preferred
6708 	 * (__GFP_RETRY_MAYFAIL mode). We still kick in kswapd/kcompactd to
6709 	 * start working in the background
6710 	 */
6711 	if (size > PAGE_SIZE) {
6712 		flags |= __GFP_NOWARN;
6713 
6714 		if (!(flags & __GFP_RETRY_MAYFAIL))
6715 			flags &= ~__GFP_DIRECT_RECLAIM;
6716 
6717 		/* nofail semantic is implemented by the vmalloc fallback */
6718 		flags &= ~__GFP_NOFAIL;
6719 	}
6720 
6721 	return flags;
6722 }
6723 
6724 /**
6725  * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
6726  * failure, fall back to non-contiguous (vmalloc) allocation.
6727  * @size: size of the request.
6728  * @b: which set of kmalloc buckets to allocate from.
6729  * @align: desired alignment.
6730  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
6731  * @node: numa node to allocate from
6732  *
6733  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6734  * Documentation/core-api/memory-allocation.rst for more details.
6735  *
6736  * Uses kmalloc to get the memory but if the allocation fails then falls back
6737  * to the vmalloc allocator. Use kvfree for freeing the memory.
6738  *
6739  * GFP_NOWAIT and GFP_ATOMIC are supported, the __GFP_NORETRY modifier is not.
6740  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
6741  * preferable to the vmalloc fallback, due to visible performance drawbacks.
6742  *
6743  * Return: pointer to the allocated memory of %NULL in case of failure
6744  */
__kvmalloc_node_noprof(DECL_BUCKET_PARAMS (size,b),unsigned long align,gfp_t flags,int node)6745 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
6746 			     gfp_t flags, int node)
6747 {
6748 	bool allow_block;
6749 	void *ret;
6750 
6751 	/*
6752 	 * It doesn't really make sense to fallback to vmalloc for sub page
6753 	 * requests
6754 	 */
6755 	ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b),
6756 				kmalloc_gfp_adjust(flags, size),
6757 				node, _RET_IP_);
6758 	if (ret || size <= PAGE_SIZE)
6759 		return ret;
6760 
6761 	/* Don't even allow crazy sizes */
6762 	if (unlikely(size > INT_MAX)) {
6763 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
6764 		return NULL;
6765 	}
6766 
6767 	/*
6768 	 * For non-blocking the VM_ALLOW_HUGE_VMAP is not used
6769 	 * because the huge-mapping path in vmalloc contains at
6770 	 * least one might_sleep() call.
6771 	 *
6772 	 * TODO: Revise huge-mapping path to support non-blocking
6773 	 * flags.
6774 	 */
6775 	allow_block = gfpflags_allow_blocking(flags);
6776 
6777 	/*
6778 	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
6779 	 * since the callers already cannot assume anything
6780 	 * about the resulting pointer, and cannot play
6781 	 * protection games.
6782 	 */
6783 	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
6784 			flags, PAGE_KERNEL, allow_block ? VM_ALLOW_HUGE_VMAP:0,
6785 			node, __builtin_return_address(0));
6786 }
6787 EXPORT_SYMBOL(__kvmalloc_node_noprof);
6788 
6789 /**
6790  * kvfree() - Free memory.
6791  * @addr: Pointer to allocated memory.
6792  *
6793  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
6794  * It is slightly more efficient to use kfree() or vfree() if you are certain
6795  * that you know which one to use.
6796  *
6797  * Context: Either preemptible task context or not-NMI interrupt.
6798  */
kvfree(const void * addr)6799 void kvfree(const void *addr)
6800 {
6801 	if (is_vmalloc_addr(addr))
6802 		vfree(addr);
6803 	else
6804 		kfree(addr);
6805 }
6806 EXPORT_SYMBOL(kvfree);
6807 
6808 /**
6809  * kvfree_sensitive - Free a data object containing sensitive information.
6810  * @addr: address of the data object to be freed.
6811  * @len: length of the data object.
6812  *
6813  * Use the special memzero_explicit() function to clear the content of a
6814  * kvmalloc'ed object containing sensitive data to make sure that the
6815  * compiler won't optimize out the data clearing.
6816  */
kvfree_sensitive(const void * addr,size_t len)6817 void kvfree_sensitive(const void *addr, size_t len)
6818 {
6819 	if (likely(!ZERO_OR_NULL_PTR(addr))) {
6820 		memzero_explicit((void *)addr, len);
6821 		kvfree(addr);
6822 	}
6823 }
6824 EXPORT_SYMBOL(kvfree_sensitive);
6825 
6826 /**
6827  * kvrealloc_node_align - reallocate memory; contents remain unchanged
6828  * @p: object to reallocate memory for
6829  * @size: the size to reallocate
6830  * @align: desired alignment
6831  * @flags: the flags for the page level allocator
6832  * @nid: NUMA node id
6833  *
6834  * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
6835  * and @p is not a %NULL pointer, the object pointed to is freed.
6836  *
6837  * Only alignments up to those guaranteed by kmalloc() will be honored. Please see
6838  * Documentation/core-api/memory-allocation.rst for more details.
6839  *
6840  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
6841  * initial memory allocation, every subsequent call to this API for the same
6842  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
6843  * __GFP_ZERO is not fully honored by this API.
6844  *
6845  * In any case, the contents of the object pointed to are preserved up to the
6846  * lesser of the new and old sizes.
6847  *
6848  * This function must not be called concurrently with itself or kvfree() for the
6849  * same memory allocation.
6850  *
6851  * Return: pointer to the allocated memory or %NULL in case of error
6852  */
kvrealloc_node_align_noprof(const void * p,size_t size,unsigned long align,gfp_t flags,int nid)6853 void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
6854 				  gfp_t flags, int nid)
6855 {
6856 	void *n;
6857 
6858 	if (is_vmalloc_addr(p))
6859 		return vrealloc_node_align_noprof(p, size, align, flags, nid);
6860 
6861 	n = krealloc_node_align_noprof(p, size, align, kmalloc_gfp_adjust(flags, size), nid);
6862 	if (!n) {
6863 		/* We failed to krealloc(), fall back to kvmalloc(). */
6864 		n = kvmalloc_node_align_noprof(size, align, flags, nid);
6865 		if (!n)
6866 			return NULL;
6867 
6868 		if (p) {
6869 			/* We already know that `p` is not a vmalloc address. */
6870 			kasan_disable_current();
6871 			memcpy(n, kasan_reset_tag(p), ksize(p));
6872 			kasan_enable_current();
6873 
6874 			kfree(p);
6875 		}
6876 	}
6877 
6878 	return n;
6879 }
6880 EXPORT_SYMBOL(kvrealloc_node_align_noprof);
6881 
6882 struct detached_freelist {
6883 	struct slab *slab;
6884 	void *tail;
6885 	void *freelist;
6886 	int cnt;
6887 	struct kmem_cache *s;
6888 };
6889 
6890 /*
6891  * This function progressively scans the array with free objects (with
6892  * a limited look ahead) and extract objects belonging to the same
6893  * slab.  It builds a detached freelist directly within the given
6894  * slab/objects.  This can happen without any need for
6895  * synchronization, because the objects are owned by running process.
6896  * The freelist is build up as a single linked list in the objects.
6897  * The idea is, that this detached freelist can then be bulk
6898  * transferred to the real freelist(s), but only requiring a single
6899  * synchronization primitive.  Look ahead in the array is limited due
6900  * to performance reasons.
6901  */
6902 static inline
build_detached_freelist(struct kmem_cache * s,size_t size,void ** p,struct detached_freelist * df)6903 int build_detached_freelist(struct kmem_cache *s, size_t size,
6904 			    void **p, struct detached_freelist *df)
6905 {
6906 	int lookahead = 3;
6907 	void *object;
6908 	struct page *page;
6909 	struct slab *slab;
6910 	size_t same;
6911 
6912 	object = p[--size];
6913 	page = virt_to_page(object);
6914 	slab = page_slab(page);
6915 	if (!s) {
6916 		/* Handle kalloc'ed objects */
6917 		if (!slab) {
6918 			free_large_kmalloc(page, object);
6919 			df->slab = NULL;
6920 			return size;
6921 		}
6922 		/* Derive kmem_cache from object */
6923 		df->slab = slab;
6924 		df->s = slab->slab_cache;
6925 	} else {
6926 		df->slab = slab;
6927 		df->s = s;
6928 	}
6929 
6930 	/* Start new detached freelist */
6931 	df->tail = object;
6932 	df->freelist = object;
6933 	df->cnt = 1;
6934 
6935 	if (is_kfence_address(object))
6936 		return size;
6937 
6938 	set_freepointer(df->s, object, NULL);
6939 
6940 	same = size;
6941 	while (size) {
6942 		object = p[--size];
6943 		/* df->slab is always set at this point */
6944 		if (df->slab == virt_to_slab(object)) {
6945 			/* Opportunity build freelist */
6946 			set_freepointer(df->s, object, df->freelist);
6947 			df->freelist = object;
6948 			df->cnt++;
6949 			same--;
6950 			if (size != same)
6951 				swap(p[size], p[same]);
6952 			continue;
6953 		}
6954 
6955 		/* Limit look ahead search */
6956 		if (!--lookahead)
6957 			break;
6958 	}
6959 
6960 	return same;
6961 }
6962 
6963 /*
6964  * Internal bulk free of objects that were not initialised by the post alloc
6965  * hooks and thus should not be processed by the free hooks
6966  */
__kmem_cache_free_bulk(struct kmem_cache * s,size_t size,void ** p)6967 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
6968 {
6969 	if (!size)
6970 		return;
6971 
6972 	do {
6973 		struct detached_freelist df;
6974 
6975 		size = build_detached_freelist(s, size, p, &df);
6976 		if (!df.slab)
6977 			continue;
6978 
6979 		if (kfence_free(df.freelist))
6980 			continue;
6981 
6982 		__slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
6983 			     _RET_IP_);
6984 	} while (likely(size));
6985 }
6986 
6987 /* Note that interrupts must be enabled when calling this function. */
kmem_cache_free_bulk(struct kmem_cache * s,size_t size,void ** p)6988 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
6989 {
6990 	if (!size)
6991 		return;
6992 
6993 	/*
6994 	 * freeing to sheaves is so incompatible with the detached freelist so
6995 	 * once we go that way, we have to do everything differently
6996 	 */
6997 	if (s && cache_has_sheaves(s)) {
6998 		free_to_pcs_bulk(s, size, p);
6999 		return;
7000 	}
7001 
7002 	do {
7003 		struct detached_freelist df;
7004 
7005 		size = build_detached_freelist(s, size, p, &df);
7006 		if (!df.slab)
7007 			continue;
7008 
7009 		slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
7010 			       df.cnt, _RET_IP_);
7011 	} while (likely(size));
7012 }
7013 EXPORT_SYMBOL(kmem_cache_free_bulk);
7014 
7015 static unsigned int
__refill_objects_node(struct kmem_cache * s,void ** p,gfp_t gfp,unsigned int min,unsigned int max,struct kmem_cache_node * n,bool allow_spin)7016 __refill_objects_node(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7017 		      unsigned int max, struct kmem_cache_node *n,
7018 		      bool allow_spin)
7019 {
7020 	struct partial_bulk_context pc;
7021 	struct slab *slab, *slab2;
7022 	unsigned int refilled = 0;
7023 	unsigned long flags;
7024 	void *object;
7025 
7026 	pc.flags = gfp;
7027 	pc.min_objects = min;
7028 	pc.max_objects = max;
7029 
7030 	if (!get_partial_node_bulk(s, n, &pc, allow_spin))
7031 		return 0;
7032 
7033 	list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7034 
7035 		list_del(&slab->slab_list);
7036 
7037 		object = get_freelist_nofreeze(s, slab);
7038 
7039 		while (object && refilled < max) {
7040 			p[refilled] = object;
7041 			object = get_freepointer(s, object);
7042 			maybe_wipe_obj_freeptr(s, p[refilled]);
7043 
7044 			refilled++;
7045 		}
7046 
7047 		/*
7048 		 * Freelist had more objects than we can accommodate, we need to
7049 		 * free them back. We can treat it like a detached freelist, just
7050 		 * need to find the tail object.
7051 		 */
7052 		if (unlikely(object)) {
7053 			void *head = object;
7054 			void *tail;
7055 			int cnt = 0;
7056 
7057 			do {
7058 				tail = object;
7059 				cnt++;
7060 				object = get_freepointer(s, object);
7061 			} while (object);
7062 			__slab_free(s, slab, head, tail, cnt, _RET_IP_);
7063 		}
7064 
7065 		if (refilled >= max)
7066 			break;
7067 	}
7068 
7069 	if (unlikely(!list_empty(&pc.slabs))) {
7070 		spin_lock_irqsave(&n->list_lock, flags);
7071 
7072 		list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7073 
7074 			if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial))
7075 				continue;
7076 
7077 			list_del(&slab->slab_list);
7078 			add_partial(n, slab, ADD_TO_HEAD);
7079 		}
7080 
7081 		spin_unlock_irqrestore(&n->list_lock, flags);
7082 
7083 		/* any slabs left are completely free and for discard */
7084 		list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
7085 
7086 			list_del(&slab->slab_list);
7087 			discard_slab(s, slab);
7088 		}
7089 	}
7090 
7091 	return refilled;
7092 }
7093 
7094 #ifdef CONFIG_NUMA
7095 static unsigned int
__refill_objects_any(struct kmem_cache * s,void ** p,gfp_t gfp,unsigned int min,unsigned int max)7096 __refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7097 		     unsigned int max)
7098 {
7099 	struct zonelist *zonelist;
7100 	struct zoneref *z;
7101 	struct zone *zone;
7102 	enum zone_type highest_zoneidx = gfp_zone(gfp);
7103 	unsigned int cpuset_mems_cookie;
7104 	unsigned int refilled = 0;
7105 
7106 	/* see get_from_any_partial() for the defrag ratio description */
7107 	if (!s->remote_node_defrag_ratio ||
7108 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
7109 		return 0;
7110 
7111 	do {
7112 		cpuset_mems_cookie = read_mems_allowed_begin();
7113 		zonelist = node_zonelist(mempolicy_slab_node(), gfp);
7114 		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
7115 			struct kmem_cache_node *n;
7116 			unsigned int r;
7117 
7118 			n = get_node(s, zone_to_nid(zone));
7119 
7120 			if (!n || !cpuset_zone_allowed(zone, gfp) ||
7121 					n->nr_partial <= s->min_partial)
7122 				continue;
7123 
7124 			r = __refill_objects_node(s, p, gfp, min, max, n,
7125 						  /* allow_spin = */ false);
7126 			refilled += r;
7127 
7128 			if (r >= min) {
7129 				/*
7130 				 * Don't check read_mems_allowed_retry() here -
7131 				 * if mems_allowed was updated in parallel, that
7132 				 * was a harmless race between allocation and
7133 				 * the cpuset update
7134 				 */
7135 				return refilled;
7136 			}
7137 			p += r;
7138 			min -= r;
7139 			max -= r;
7140 		}
7141 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
7142 
7143 	return refilled;
7144 }
7145 #else
7146 static inline unsigned int
__refill_objects_any(struct kmem_cache * s,void ** p,gfp_t gfp,unsigned int min,unsigned int max)7147 __refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7148 		     unsigned int max)
7149 {
7150 	return 0;
7151 }
7152 #endif
7153 
7154 static unsigned int
refill_objects(struct kmem_cache * s,void ** p,gfp_t gfp,unsigned int min,unsigned int max)7155 refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
7156 	       unsigned int max)
7157 {
7158 	int local_node = numa_mem_id();
7159 	unsigned int refilled;
7160 	struct slab *slab;
7161 
7162 	if (WARN_ON_ONCE(!gfpflags_allow_spinning(gfp)))
7163 		return 0;
7164 
7165 	refilled = __refill_objects_node(s, p, gfp, min, max,
7166 					 get_node(s, local_node),
7167 					 /* allow_spin = */ true);
7168 	if (refilled >= min)
7169 		return refilled;
7170 
7171 	refilled += __refill_objects_any(s, p + refilled, gfp, min - refilled,
7172 					 max - refilled);
7173 	if (refilled >= min)
7174 		return refilled;
7175 
7176 new_slab:
7177 
7178 	slab = new_slab(s, gfp, local_node);
7179 	if (!slab)
7180 		goto out;
7181 
7182 	stat(s, ALLOC_SLAB);
7183 
7184 	/*
7185 	 * TODO: possible optimization - if we know we will consume the whole
7186 	 * slab we might skip creating the freelist?
7187 	 */
7188 	refilled += alloc_from_new_slab(s, slab, p + refilled, max - refilled,
7189 					/* allow_spin = */ true);
7190 
7191 	if (refilled < min)
7192 		goto new_slab;
7193 
7194 out:
7195 	return refilled;
7196 }
7197 
7198 static inline
__kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t size,void ** p)7199 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
7200 			    void **p)
7201 {
7202 	int i;
7203 
7204 	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
7205 		for (i = 0; i < size; i++) {
7206 
7207 			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_,
7208 					     s->object_size);
7209 			if (unlikely(!p[i]))
7210 				goto error;
7211 
7212 			maybe_wipe_obj_freeptr(s, p[i]);
7213 		}
7214 	} else {
7215 		i = refill_objects(s, p, flags, size, size);
7216 		if (i < size)
7217 			goto error;
7218 		stat_add(s, ALLOC_SLOWPATH, i);
7219 	}
7220 
7221 	return i;
7222 
7223 error:
7224 	__kmem_cache_free_bulk(s, i, p);
7225 	return 0;
7226 
7227 }
7228 
7229 /*
7230  * Note that interrupts must be enabled when calling this function and gfp
7231  * flags must allow spinning.
7232  */
kmem_cache_alloc_bulk_noprof(struct kmem_cache * s,gfp_t flags,size_t size,void ** p)7233 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
7234 				 void **p)
7235 {
7236 	unsigned int i = 0;
7237 	void *kfence_obj;
7238 
7239 	if (!size)
7240 		return 0;
7241 
7242 	s = slab_pre_alloc_hook(s, flags);
7243 	if (unlikely(!s))
7244 		return 0;
7245 
7246 	/*
7247 	 * to make things simpler, only assume at most once kfence allocated
7248 	 * object per bulk allocation and choose its index randomly
7249 	 */
7250 	kfence_obj = kfence_alloc(s, s->object_size, flags);
7251 
7252 	if (unlikely(kfence_obj)) {
7253 		if (unlikely(size == 1)) {
7254 			p[0] = kfence_obj;
7255 			goto out;
7256 		}
7257 		size--;
7258 	}
7259 
7260 	i = alloc_from_pcs_bulk(s, flags, size, p);
7261 
7262 	if (i < size) {
7263 		/*
7264 		 * If we ran out of memory, don't bother with freeing back to
7265 		 * the percpu sheaves, we have bigger problems.
7266 		 */
7267 		if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) {
7268 			if (i > 0)
7269 				__kmem_cache_free_bulk(s, i, p);
7270 			if (kfence_obj)
7271 				__kfence_free(kfence_obj);
7272 			return 0;
7273 		}
7274 	}
7275 
7276 	if (unlikely(kfence_obj)) {
7277 		int idx = get_random_u32_below(size + 1);
7278 
7279 		if (idx != size)
7280 			p[size] = p[idx];
7281 		p[idx] = kfence_obj;
7282 
7283 		size++;
7284 	}
7285 
7286 out:
7287 	/*
7288 	 * memcg and kmem_cache debug support and memory initialization.
7289 	 * Done outside of the IRQ disabled fastpath loop.
7290 	 */
7291 	if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
7292 		    slab_want_init_on_alloc(flags, s), s->object_size))) {
7293 		return 0;
7294 	}
7295 
7296 	return size;
7297 }
7298 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
7299 
7300 /*
7301  * Object placement in a slab is made very easy because we always start at
7302  * offset 0. If we tune the size of the object to the alignment then we can
7303  * get the required alignment by putting one properly sized object after
7304  * another.
7305  *
7306  * Notice that the allocation order determines the sizes of the per cpu
7307  * caches. Each processor has always one slab available for allocations.
7308  * Increasing the allocation order reduces the number of times that slabs
7309  * must be moved on and off the partial lists and is therefore a factor in
7310  * locking overhead.
7311  */
7312 
7313 /*
7314  * Minimum / Maximum order of slab pages. This influences locking overhead
7315  * and slab fragmentation. A higher order reduces the number of partial slabs
7316  * and increases the number of allocations possible without having to
7317  * take the list_lock.
7318  */
7319 static unsigned int slub_min_order;
7320 static unsigned int slub_max_order =
7321 	IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
7322 static unsigned int slub_min_objects;
7323 
7324 /*
7325  * Calculate the order of allocation given an slab object size.
7326  *
7327  * The order of allocation has significant impact on performance and other
7328  * system components. Generally order 0 allocations should be preferred since
7329  * order 0 does not cause fragmentation in the page allocator. Larger objects
7330  * be problematic to put into order 0 slabs because there may be too much
7331  * unused space left. We go to a higher order if more than 1/16th of the slab
7332  * would be wasted.
7333  *
7334  * In order to reach satisfactory performance we must ensure that a minimum
7335  * number of objects is in one slab. Otherwise we may generate too much
7336  * activity on the partial lists which requires taking the list_lock. This is
7337  * less a concern for large slabs though which are rarely used.
7338  *
7339  * slab_max_order specifies the order where we begin to stop considering the
7340  * number of objects in a slab as critical. If we reach slab_max_order then
7341  * we try to keep the page order as low as possible. So we accept more waste
7342  * of space in favor of a small page order.
7343  *
7344  * Higher order allocations also allow the placement of more objects in a
7345  * slab and thereby reduce object handling overhead. If the user has
7346  * requested a higher minimum order then we start with that one instead of
7347  * the smallest order which will fit the object.
7348  */
calc_slab_order(unsigned int size,unsigned int min_order,unsigned int max_order,unsigned int fract_leftover)7349 static inline unsigned int calc_slab_order(unsigned int size,
7350 		unsigned int min_order, unsigned int max_order,
7351 		unsigned int fract_leftover)
7352 {
7353 	unsigned int order;
7354 
7355 	for (order = min_order; order <= max_order; order++) {
7356 
7357 		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
7358 		unsigned int rem;
7359 
7360 		rem = slab_size % size;
7361 
7362 		if (rem <= slab_size / fract_leftover)
7363 			break;
7364 	}
7365 
7366 	return order;
7367 }
7368 
calculate_order(unsigned int size)7369 static inline int calculate_order(unsigned int size)
7370 {
7371 	unsigned int order;
7372 	unsigned int min_objects;
7373 	unsigned int max_objects;
7374 	unsigned int min_order;
7375 
7376 	min_objects = slub_min_objects;
7377 	if (!min_objects) {
7378 		/*
7379 		 * Some architectures will only update present cpus when
7380 		 * onlining them, so don't trust the number if it's just 1. But
7381 		 * we also don't want to use nr_cpu_ids always, as on some other
7382 		 * architectures, there can be many possible cpus, but never
7383 		 * onlined. Here we compromise between trying to avoid too high
7384 		 * order on systems that appear larger than they are, and too
7385 		 * low order on systems that appear smaller than they are.
7386 		 */
7387 		unsigned int nr_cpus = num_present_cpus();
7388 		if (nr_cpus <= 1)
7389 			nr_cpus = nr_cpu_ids;
7390 		min_objects = 4 * (fls(nr_cpus) + 1);
7391 	}
7392 	/* min_objects can't be 0 because get_order(0) is undefined */
7393 	max_objects = max(order_objects(slub_max_order, size), 1U);
7394 	min_objects = min(min_objects, max_objects);
7395 
7396 	min_order = max_t(unsigned int, slub_min_order,
7397 			  get_order(min_objects * size));
7398 	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
7399 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
7400 
7401 	/*
7402 	 * Attempt to find best configuration for a slab. This works by first
7403 	 * attempting to generate a layout with the best possible configuration
7404 	 * and backing off gradually.
7405 	 *
7406 	 * We start with accepting at most 1/16 waste and try to find the
7407 	 * smallest order from min_objects-derived/slab_min_order up to
7408 	 * slab_max_order that will satisfy the constraint. Note that increasing
7409 	 * the order can only result in same or less fractional waste, not more.
7410 	 *
7411 	 * If that fails, we increase the acceptable fraction of waste and try
7412 	 * again. The last iteration with fraction of 1/2 would effectively
7413 	 * accept any waste and give us the order determined by min_objects, as
7414 	 * long as at least single object fits within slab_max_order.
7415 	 */
7416 	for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
7417 		order = calc_slab_order(size, min_order, slub_max_order,
7418 					fraction);
7419 		if (order <= slub_max_order)
7420 			return order;
7421 	}
7422 
7423 	/*
7424 	 * Doh this slab cannot be placed using slab_max_order.
7425 	 */
7426 	order = get_order(size);
7427 	if (order <= MAX_PAGE_ORDER)
7428 		return order;
7429 	return -ENOSYS;
7430 }
7431 
7432 static void
init_kmem_cache_node(struct kmem_cache_node * n,struct node_barn * barn)7433 init_kmem_cache_node(struct kmem_cache_node *n, struct node_barn *barn)
7434 {
7435 	n->nr_partial = 0;
7436 	spin_lock_init(&n->list_lock);
7437 	INIT_LIST_HEAD(&n->partial);
7438 #ifdef CONFIG_SLUB_DEBUG
7439 	atomic_long_set(&n->nr_slabs, 0);
7440 	atomic_long_set(&n->total_objects, 0);
7441 	INIT_LIST_HEAD(&n->full);
7442 #endif
7443 	n->barn = barn;
7444 	if (barn)
7445 		barn_init(barn);
7446 }
7447 
7448 #ifdef CONFIG_SLUB_STATS
alloc_kmem_cache_stats(struct kmem_cache * s)7449 static inline int alloc_kmem_cache_stats(struct kmem_cache *s)
7450 {
7451 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
7452 			NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
7453 			sizeof(struct kmem_cache_stats));
7454 
7455 	s->cpu_stats = alloc_percpu(struct kmem_cache_stats);
7456 
7457 	if (!s->cpu_stats)
7458 		return 0;
7459 
7460 	return 1;
7461 }
7462 #endif
7463 
init_percpu_sheaves(struct kmem_cache * s)7464 static int init_percpu_sheaves(struct kmem_cache *s)
7465 {
7466 	static struct slab_sheaf bootstrap_sheaf = {};
7467 	int cpu;
7468 
7469 	for_each_possible_cpu(cpu) {
7470 		struct slub_percpu_sheaves *pcs;
7471 
7472 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
7473 
7474 		local_trylock_init(&pcs->lock);
7475 
7476 		/*
7477 		 * Bootstrap sheaf has zero size so fast-path allocation fails.
7478 		 * It has also size == s->sheaf_capacity, so fast-path free
7479 		 * fails. In the slow paths we recognize the situation by
7480 		 * checking s->sheaf_capacity. This allows fast paths to assume
7481 		 * s->cpu_sheaves and pcs->main always exists and are valid.
7482 		 * It's also safe to share the single static bootstrap_sheaf
7483 		 * with zero-sized objects array as it's never modified.
7484 		 *
7485 		 * Bootstrap_sheaf also has NULL pointer to kmem_cache so we
7486 		 * recognize it and not attempt to free it when destroying the
7487 		 * cache.
7488 		 *
7489 		 * We keep bootstrap_sheaf for kmem_cache and kmem_cache_node,
7490 		 * caches with debug enabled, and all caches with SLUB_TINY.
7491 		 * For kmalloc caches it's used temporarily during the initial
7492 		 * bootstrap.
7493 		 */
7494 		if (!s->sheaf_capacity)
7495 			pcs->main = &bootstrap_sheaf;
7496 		else
7497 			pcs->main = alloc_empty_sheaf(s, GFP_KERNEL);
7498 
7499 		if (!pcs->main)
7500 			return -ENOMEM;
7501 	}
7502 
7503 	return 0;
7504 }
7505 
7506 static struct kmem_cache *kmem_cache_node;
7507 
7508 /*
7509  * No kmalloc_node yet so do it by hand. We know that this is the first
7510  * slab on the node for this slabcache. There are no concurrent accesses
7511  * possible.
7512  *
7513  * Note that this function only works on the kmem_cache_node
7514  * when allocating for the kmem_cache_node. This is used for bootstrapping
7515  * memory on a fresh node that has no slab structures yet.
7516  */
early_kmem_cache_node_alloc(int node)7517 static void early_kmem_cache_node_alloc(int node)
7518 {
7519 	struct slab *slab;
7520 	struct kmem_cache_node *n;
7521 
7522 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
7523 
7524 	slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
7525 
7526 	BUG_ON(!slab);
7527 	if (slab_nid(slab) != node) {
7528 		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
7529 		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
7530 	}
7531 
7532 	n = slab->freelist;
7533 	BUG_ON(!n);
7534 #ifdef CONFIG_SLUB_DEBUG
7535 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
7536 #endif
7537 	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
7538 	slab->freelist = get_freepointer(kmem_cache_node, n);
7539 	slab->inuse = 1;
7540 	kmem_cache_node->node[node] = n;
7541 	init_kmem_cache_node(n, NULL);
7542 	inc_slabs_node(kmem_cache_node, node, slab->objects);
7543 
7544 	/*
7545 	 * No locks need to be taken here as it has just been
7546 	 * initialized and there is no concurrent access.
7547 	 */
7548 	__add_partial(n, slab, ADD_TO_HEAD);
7549 }
7550 
free_kmem_cache_nodes(struct kmem_cache * s)7551 static void free_kmem_cache_nodes(struct kmem_cache *s)
7552 {
7553 	int node;
7554 	struct kmem_cache_node *n;
7555 
7556 	for_each_kmem_cache_node(s, node, n) {
7557 		if (n->barn) {
7558 			WARN_ON(n->barn->nr_full);
7559 			WARN_ON(n->barn->nr_empty);
7560 			kfree(n->barn);
7561 			n->barn = NULL;
7562 		}
7563 
7564 		s->node[node] = NULL;
7565 		kmem_cache_free(kmem_cache_node, n);
7566 	}
7567 }
7568 
__kmem_cache_release(struct kmem_cache * s)7569 void __kmem_cache_release(struct kmem_cache *s)
7570 {
7571 	cache_random_seq_destroy(s);
7572 	pcs_destroy(s);
7573 #ifdef CONFIG_SLUB_STATS
7574 	free_percpu(s->cpu_stats);
7575 #endif
7576 	free_kmem_cache_nodes(s);
7577 }
7578 
init_kmem_cache_nodes(struct kmem_cache * s)7579 static int init_kmem_cache_nodes(struct kmem_cache *s)
7580 {
7581 	int node;
7582 
7583 	for_each_node_mask(node, slab_nodes) {
7584 		struct kmem_cache_node *n;
7585 		struct node_barn *barn = NULL;
7586 
7587 		if (slab_state == DOWN) {
7588 			early_kmem_cache_node_alloc(node);
7589 			continue;
7590 		}
7591 
7592 		if (cache_has_sheaves(s)) {
7593 			barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
7594 
7595 			if (!barn)
7596 				return 0;
7597 		}
7598 
7599 		n = kmem_cache_alloc_node(kmem_cache_node,
7600 						GFP_KERNEL, node);
7601 		if (!n) {
7602 			kfree(barn);
7603 			return 0;
7604 		}
7605 
7606 		init_kmem_cache_node(n, barn);
7607 
7608 		s->node[node] = n;
7609 	}
7610 	return 1;
7611 }
7612 
calculate_sheaf_capacity(struct kmem_cache * s,struct kmem_cache_args * args)7613 static unsigned int calculate_sheaf_capacity(struct kmem_cache *s,
7614 					     struct kmem_cache_args *args)
7615 
7616 {
7617 	unsigned int capacity;
7618 	size_t size;
7619 
7620 
7621 	if (IS_ENABLED(CONFIG_SLUB_TINY) || s->flags & SLAB_DEBUG_FLAGS)
7622 		return 0;
7623 
7624 	/*
7625 	 * Bootstrap caches can't have sheaves for now (SLAB_NO_OBJ_EXT).
7626 	 * SLAB_NOLEAKTRACE caches (e.g., kmemleak's object_cache) must not
7627 	 * have sheaves to avoid recursion when sheaf allocation triggers
7628 	 * kmemleak tracking.
7629 	 */
7630 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
7631 		return 0;
7632 
7633 	/*
7634 	 * For now we use roughly similar formula (divided by two as there are
7635 	 * two percpu sheaves) as what was used for percpu partial slabs, which
7636 	 * should result in similar lock contention (barn or list_lock)
7637 	 */
7638 	if (s->size >= PAGE_SIZE)
7639 		capacity = 4;
7640 	else if (s->size >= 1024)
7641 		capacity = 12;
7642 	else if (s->size >= 256)
7643 		capacity = 26;
7644 	else
7645 		capacity = 60;
7646 
7647 	/* Increment capacity to make sheaf exactly a kmalloc size bucket */
7648 	size = struct_size_t(struct slab_sheaf, objects, capacity);
7649 	size = kmalloc_size_roundup(size);
7650 	capacity = (size - struct_size_t(struct slab_sheaf, objects, 0)) / sizeof(void *);
7651 
7652 	/*
7653 	 * Respect an explicit request for capacity that's typically motivated by
7654 	 * expected maximum size of kmem_cache_prefill_sheaf() to not end up
7655 	 * using low-performance oversize sheaves
7656 	 */
7657 	return max(capacity, args->sheaf_capacity);
7658 }
7659 
7660 /*
7661  * calculate_sizes() determines the order and the distribution of data within
7662  * a slab object.
7663  */
calculate_sizes(struct kmem_cache_args * args,struct kmem_cache * s)7664 static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
7665 {
7666 	slab_flags_t flags = s->flags;
7667 	unsigned int size = s->object_size;
7668 	unsigned int aligned_size;
7669 	unsigned int order;
7670 
7671 	/*
7672 	 * Round up object size to the next word boundary. We can only
7673 	 * place the free pointer at word boundaries and this determines
7674 	 * the possible location of the free pointer.
7675 	 */
7676 	size = ALIGN(size, sizeof(void *));
7677 
7678 #ifdef CONFIG_SLUB_DEBUG
7679 	/*
7680 	 * Determine if we can poison the object itself. If the user of
7681 	 * the slab may touch the object after free or before allocation
7682 	 * then we should never poison the object itself.
7683 	 */
7684 	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
7685 			!s->ctor)
7686 		s->flags |= __OBJECT_POISON;
7687 	else
7688 		s->flags &= ~__OBJECT_POISON;
7689 
7690 
7691 	/*
7692 	 * If we are Redzoning and there is no space between the end of the
7693 	 * object and the following fields, add one word so the right Redzone
7694 	 * is non-empty.
7695 	 */
7696 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
7697 		size += sizeof(void *);
7698 #endif
7699 
7700 	/*
7701 	 * With that we have determined the number of bytes in actual use
7702 	 * by the object and redzoning.
7703 	 */
7704 	s->inuse = size;
7705 
7706 	if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
7707 	    (flags & SLAB_POISON) ||
7708 	    (s->ctor && !args->use_freeptr_offset) ||
7709 	    ((flags & SLAB_RED_ZONE) &&
7710 	     (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
7711 		/*
7712 		 * Relocate free pointer after the object if it is not
7713 		 * permitted to overwrite the first word of the object on
7714 		 * kmem_cache_free.
7715 		 *
7716 		 * This is the case if we do RCU, have a constructor, are
7717 		 * poisoning the objects, or are redzoning an object smaller
7718 		 * than sizeof(void *) or are redzoning an object with
7719 		 * slub_debug_orig_size() enabled, in which case the right
7720 		 * redzone may be extended.
7721 		 *
7722 		 * The assumption that s->offset >= s->inuse means free
7723 		 * pointer is outside of the object is used in the
7724 		 * freeptr_outside_object() function. If that is no
7725 		 * longer true, the function needs to be modified.
7726 		 */
7727 		s->offset = size;
7728 		size += sizeof(void *);
7729 	} else if (((flags & SLAB_TYPESAFE_BY_RCU) || s->ctor) &&
7730 			args->use_freeptr_offset) {
7731 		s->offset = args->freeptr_offset;
7732 	} else {
7733 		/*
7734 		 * Store freelist pointer near middle of object to keep
7735 		 * it away from the edges of the object to avoid small
7736 		 * sized over/underflows from neighboring allocations.
7737 		 */
7738 		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
7739 	}
7740 
7741 #ifdef CONFIG_SLUB_DEBUG
7742 	if (flags & SLAB_STORE_USER) {
7743 		/*
7744 		 * Need to store information about allocs and frees after
7745 		 * the object.
7746 		 */
7747 		size += 2 * sizeof(struct track);
7748 
7749 		/* Save the original kmalloc request size */
7750 		if (flags & SLAB_KMALLOC)
7751 			size += sizeof(unsigned long);
7752 	}
7753 #endif
7754 
7755 	kasan_cache_create(s, &size, &s->flags);
7756 #ifdef CONFIG_SLUB_DEBUG
7757 	if (flags & SLAB_RED_ZONE) {
7758 		/*
7759 		 * Add some empty padding so that we can catch
7760 		 * overwrites from earlier objects rather than let
7761 		 * tracking information or the free pointer be
7762 		 * corrupted if a user writes before the start
7763 		 * of the object.
7764 		 */
7765 		size += sizeof(void *);
7766 
7767 		s->red_left_pad = sizeof(void *);
7768 		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
7769 		size += s->red_left_pad;
7770 	}
7771 #endif
7772 
7773 	/*
7774 	 * SLUB stores one object immediately after another beginning from
7775 	 * offset 0. In order to align the objects we have to simply size
7776 	 * each object to conform to the alignment.
7777 	 */
7778 	aligned_size = ALIGN(size, s->align);
7779 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
7780 	if (slab_args_unmergeable(args, s->flags) &&
7781 			(aligned_size - size >= sizeof(struct slabobj_ext)))
7782 		s->flags |= SLAB_OBJ_EXT_IN_OBJ;
7783 #endif
7784 	size = aligned_size;
7785 
7786 	s->size = size;
7787 	s->reciprocal_size = reciprocal_value(size);
7788 	order = calculate_order(size);
7789 
7790 	if ((int)order < 0)
7791 		return 0;
7792 
7793 	s->allocflags = __GFP_COMP;
7794 
7795 	if (s->flags & SLAB_CACHE_DMA)
7796 		s->allocflags |= GFP_DMA;
7797 
7798 	if (s->flags & SLAB_CACHE_DMA32)
7799 		s->allocflags |= GFP_DMA32;
7800 
7801 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
7802 		s->allocflags |= __GFP_RECLAIMABLE;
7803 
7804 	/*
7805 	 * For KMALLOC_NORMAL caches we enable sheaves later by
7806 	 * bootstrap_kmalloc_sheaves() to avoid recursion
7807 	 */
7808 	if (!is_kmalloc_normal(s))
7809 		s->sheaf_capacity = calculate_sheaf_capacity(s, args);
7810 
7811 	/*
7812 	 * Determine the number of objects per slab
7813 	 */
7814 	s->oo = oo_make(order, size);
7815 	s->min = oo_make(get_order(size), size);
7816 
7817 	return !!oo_objects(s->oo);
7818 }
7819 
list_slab_objects(struct kmem_cache * s,struct slab * slab)7820 static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
7821 {
7822 #ifdef CONFIG_SLUB_DEBUG
7823 	void *addr = slab_address(slab);
7824 	void *p;
7825 
7826 	if (!slab_add_kunit_errors())
7827 		slab_bug(s, "Objects remaining on __kmem_cache_shutdown()");
7828 
7829 	spin_lock(&object_map_lock);
7830 	__fill_map(object_map, s, slab);
7831 
7832 	for_each_object(p, s, addr, slab->objects) {
7833 
7834 		if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
7835 			if (slab_add_kunit_errors())
7836 				continue;
7837 			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
7838 			print_tracking(s, p);
7839 		}
7840 	}
7841 	spin_unlock(&object_map_lock);
7842 
7843 	__slab_err(slab);
7844 #endif
7845 }
7846 
7847 /*
7848  * Attempt to free all partial slabs on a node.
7849  * This is called from __kmem_cache_shutdown(). We must take list_lock
7850  * because sysfs file might still access partial list after the shutdowning.
7851  */
free_partial(struct kmem_cache * s,struct kmem_cache_node * n)7852 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
7853 {
7854 	LIST_HEAD(discard);
7855 	struct slab *slab, *h;
7856 
7857 	BUG_ON(irqs_disabled());
7858 	spin_lock_irq(&n->list_lock);
7859 	list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
7860 		if (!slab->inuse) {
7861 			remove_partial(n, slab);
7862 			list_add(&slab->slab_list, &discard);
7863 		} else {
7864 			list_slab_objects(s, slab);
7865 		}
7866 	}
7867 	spin_unlock_irq(&n->list_lock);
7868 
7869 	list_for_each_entry_safe(slab, h, &discard, slab_list)
7870 		discard_slab(s, slab);
7871 }
7872 
__kmem_cache_empty(struct kmem_cache * s)7873 bool __kmem_cache_empty(struct kmem_cache *s)
7874 {
7875 	int node;
7876 	struct kmem_cache_node *n;
7877 
7878 	for_each_kmem_cache_node(s, node, n)
7879 		if (n->nr_partial || node_nr_slabs(n))
7880 			return false;
7881 	return true;
7882 }
7883 
7884 /*
7885  * Release all resources used by a slab cache.
7886  */
__kmem_cache_shutdown(struct kmem_cache * s)7887 int __kmem_cache_shutdown(struct kmem_cache *s)
7888 {
7889 	int node;
7890 	struct kmem_cache_node *n;
7891 
7892 	flush_all_cpus_locked(s);
7893 
7894 	/* we might have rcu sheaves in flight */
7895 	if (cache_has_sheaves(s))
7896 		rcu_barrier();
7897 
7898 	/* Attempt to free all objects */
7899 	for_each_kmem_cache_node(s, node, n) {
7900 		if (n->barn)
7901 			barn_shrink(s, n->barn);
7902 		free_partial(s, n);
7903 		if (n->nr_partial || node_nr_slabs(n))
7904 			return 1;
7905 	}
7906 	return 0;
7907 }
7908 
7909 #ifdef CONFIG_PRINTK
__kmem_obj_info(struct kmem_obj_info * kpp,void * object,struct slab * slab)7910 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
7911 {
7912 	void *base;
7913 	int __maybe_unused i;
7914 	unsigned int objnr;
7915 	void *objp;
7916 	void *objp0;
7917 	struct kmem_cache *s = slab->slab_cache;
7918 	struct track __maybe_unused *trackp;
7919 
7920 	kpp->kp_ptr = object;
7921 	kpp->kp_slab = slab;
7922 	kpp->kp_slab_cache = s;
7923 	base = slab_address(slab);
7924 	objp0 = kasan_reset_tag(object);
7925 #ifdef CONFIG_SLUB_DEBUG
7926 	objp = restore_red_left(s, objp0);
7927 #else
7928 	objp = objp0;
7929 #endif
7930 	objnr = obj_to_index(s, slab, objp);
7931 	kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
7932 	objp = base + s->size * objnr;
7933 	kpp->kp_objp = objp;
7934 	if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
7935 			 || (objp - base) % s->size) ||
7936 	    !(s->flags & SLAB_STORE_USER))
7937 		return;
7938 #ifdef CONFIG_SLUB_DEBUG
7939 	objp = fixup_red_left(s, objp);
7940 	trackp = get_track(s, objp, TRACK_ALLOC);
7941 	kpp->kp_ret = (void *)trackp->addr;
7942 #ifdef CONFIG_STACKDEPOT
7943 	{
7944 		depot_stack_handle_t handle;
7945 		unsigned long *entries;
7946 		unsigned int nr_entries;
7947 
7948 		handle = READ_ONCE(trackp->handle);
7949 		if (handle) {
7950 			nr_entries = stack_depot_fetch(handle, &entries);
7951 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
7952 				kpp->kp_stack[i] = (void *)entries[i];
7953 		}
7954 
7955 		trackp = get_track(s, objp, TRACK_FREE);
7956 		handle = READ_ONCE(trackp->handle);
7957 		if (handle) {
7958 			nr_entries = stack_depot_fetch(handle, &entries);
7959 			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
7960 				kpp->kp_free_stack[i] = (void *)entries[i];
7961 		}
7962 	}
7963 #endif
7964 #endif
7965 }
7966 #endif
7967 
7968 /********************************************************************
7969  *		Kmalloc subsystem
7970  *******************************************************************/
7971 
setup_slub_min_order(const char * str,const struct kernel_param * kp)7972 static int __init setup_slub_min_order(const char *str, const struct kernel_param *kp)
7973 {
7974 	int ret;
7975 
7976 	ret = kstrtouint(str, 0, &slub_min_order);
7977 	if (ret)
7978 		return ret;
7979 
7980 	if (slub_min_order > slub_max_order)
7981 		slub_max_order = slub_min_order;
7982 
7983 	return 0;
7984 }
7985 
7986 static const struct kernel_param_ops param_ops_slab_min_order __initconst = {
7987 	.set = setup_slub_min_order,
7988 };
7989 __core_param_cb(slab_min_order, &param_ops_slab_min_order, &slub_min_order, 0);
7990 __core_param_cb(slub_min_order, &param_ops_slab_min_order, &slub_min_order, 0);
7991 
setup_slub_max_order(const char * str,const struct kernel_param * kp)7992 static int __init setup_slub_max_order(const char *str, const struct kernel_param *kp)
7993 {
7994 	int ret;
7995 
7996 	ret = kstrtouint(str, 0, &slub_max_order);
7997 	if (ret)
7998 		return ret;
7999 
8000 	slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
8001 
8002 	if (slub_min_order > slub_max_order)
8003 		slub_min_order = slub_max_order;
8004 
8005 	return 0;
8006 }
8007 
8008 static const struct kernel_param_ops param_ops_slab_max_order __initconst = {
8009 	.set = setup_slub_max_order,
8010 };
8011 __core_param_cb(slab_max_order, &param_ops_slab_max_order, &slub_max_order, 0);
8012 __core_param_cb(slub_max_order, &param_ops_slab_max_order, &slub_max_order, 0);
8013 
8014 core_param(slab_min_objects, slub_min_objects, uint, 0);
8015 core_param(slub_min_objects, slub_min_objects, uint, 0);
8016 
8017 #ifdef CONFIG_NUMA
setup_slab_strict_numa(const char * str,const struct kernel_param * kp)8018 static int __init setup_slab_strict_numa(const char *str, const struct kernel_param *kp)
8019 {
8020 	if (nr_node_ids > 1) {
8021 		static_branch_enable(&strict_numa);
8022 		pr_info("SLUB: Strict NUMA enabled.\n");
8023 	} else {
8024 		pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
8025 	}
8026 
8027 	return 0;
8028 }
8029 
8030 static const struct kernel_param_ops param_ops_slab_strict_numa __initconst = {
8031 	.flags = KERNEL_PARAM_OPS_FL_NOARG,
8032 	.set = setup_slab_strict_numa,
8033 };
8034 __core_param_cb(slab_strict_numa, &param_ops_slab_strict_numa, NULL, 0);
8035 #endif
8036 
8037 
8038 #ifdef CONFIG_HARDENED_USERCOPY
8039 /*
8040  * Rejects incorrectly sized objects and objects that are to be copied
8041  * to/from userspace but do not fall entirely within the containing slab
8042  * cache's usercopy region.
8043  *
8044  * Returns NULL if check passes, otherwise const char * to name of cache
8045  * to indicate an error.
8046  */
__check_heap_object(const void * ptr,unsigned long n,const struct slab * slab,bool to_user)8047 void __check_heap_object(const void *ptr, unsigned long n,
8048 			 const struct slab *slab, bool to_user)
8049 {
8050 	struct kmem_cache *s;
8051 	unsigned int offset;
8052 	bool is_kfence = is_kfence_address(ptr);
8053 
8054 	ptr = kasan_reset_tag(ptr);
8055 
8056 	/* Find object and usable object size. */
8057 	s = slab->slab_cache;
8058 
8059 	/* Reject impossible pointers. */
8060 	if (ptr < slab_address(slab))
8061 		usercopy_abort("SLUB object not in SLUB page?!", NULL,
8062 			       to_user, 0, n);
8063 
8064 	/* Find offset within object. */
8065 	if (is_kfence)
8066 		offset = ptr - kfence_object_start(ptr);
8067 	else
8068 		offset = (ptr - slab_address(slab)) % s->size;
8069 
8070 	/* Adjust for redzone and reject if within the redzone. */
8071 	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
8072 		if (offset < s->red_left_pad)
8073 			usercopy_abort("SLUB object in left red zone",
8074 				       s->name, to_user, offset, n);
8075 		offset -= s->red_left_pad;
8076 	}
8077 
8078 	/* Allow address range falling entirely within usercopy region. */
8079 	if (offset >= s->useroffset &&
8080 	    offset - s->useroffset <= s->usersize &&
8081 	    n <= s->useroffset - offset + s->usersize)
8082 		return;
8083 
8084 	usercopy_abort("SLUB object", s->name, to_user, offset, n);
8085 }
8086 #endif /* CONFIG_HARDENED_USERCOPY */
8087 
8088 #define SHRINK_PROMOTE_MAX 32
8089 
8090 /*
8091  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
8092  * up most to the head of the partial lists. New allocations will then
8093  * fill those up and thus they can be removed from the partial lists.
8094  *
8095  * The slabs with the least items are placed last. This results in them
8096  * being allocated from last increasing the chance that the last objects
8097  * are freed in them.
8098  */
__kmem_cache_do_shrink(struct kmem_cache * s)8099 static int __kmem_cache_do_shrink(struct kmem_cache *s)
8100 {
8101 	int node;
8102 	int i;
8103 	struct kmem_cache_node *n;
8104 	struct slab *slab;
8105 	struct slab *t;
8106 	struct list_head discard;
8107 	struct list_head promote[SHRINK_PROMOTE_MAX];
8108 	unsigned long flags;
8109 	int ret = 0;
8110 
8111 	for_each_kmem_cache_node(s, node, n) {
8112 		INIT_LIST_HEAD(&discard);
8113 		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
8114 			INIT_LIST_HEAD(promote + i);
8115 
8116 		if (n->barn)
8117 			barn_shrink(s, n->barn);
8118 
8119 		spin_lock_irqsave(&n->list_lock, flags);
8120 
8121 		/*
8122 		 * Build lists of slabs to discard or promote.
8123 		 *
8124 		 * Note that concurrent frees may occur while we hold the
8125 		 * list_lock. slab->inuse here is the upper limit.
8126 		 */
8127 		list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
8128 			int free = slab->objects - slab->inuse;
8129 
8130 			/* Do not reread slab->inuse */
8131 			barrier();
8132 
8133 			/* We do not keep full slabs on the list */
8134 			BUG_ON(free <= 0);
8135 
8136 			if (free == slab->objects) {
8137 				list_move(&slab->slab_list, &discard);
8138 				slab_clear_node_partial(slab);
8139 				n->nr_partial--;
8140 				dec_slabs_node(s, node, slab->objects);
8141 			} else if (free <= SHRINK_PROMOTE_MAX)
8142 				list_move(&slab->slab_list, promote + free - 1);
8143 		}
8144 
8145 		/*
8146 		 * Promote the slabs filled up most to the head of the
8147 		 * partial list.
8148 		 */
8149 		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
8150 			list_splice(promote + i, &n->partial);
8151 
8152 		spin_unlock_irqrestore(&n->list_lock, flags);
8153 
8154 		/* Release empty slabs */
8155 		list_for_each_entry_safe(slab, t, &discard, slab_list)
8156 			free_slab(s, slab);
8157 
8158 		if (node_nr_slabs(n))
8159 			ret = 1;
8160 	}
8161 
8162 	return ret;
8163 }
8164 
__kmem_cache_shrink(struct kmem_cache * s)8165 int __kmem_cache_shrink(struct kmem_cache *s)
8166 {
8167 	flush_all(s);
8168 	return __kmem_cache_do_shrink(s);
8169 }
8170 
slab_mem_going_offline_callback(void)8171 static int slab_mem_going_offline_callback(void)
8172 {
8173 	struct kmem_cache *s;
8174 
8175 	mutex_lock(&slab_mutex);
8176 	list_for_each_entry(s, &slab_caches, list) {
8177 		flush_all_cpus_locked(s);
8178 		__kmem_cache_do_shrink(s);
8179 	}
8180 	mutex_unlock(&slab_mutex);
8181 
8182 	return 0;
8183 }
8184 
slab_mem_going_online_callback(int nid)8185 static int slab_mem_going_online_callback(int nid)
8186 {
8187 	struct kmem_cache_node *n;
8188 	struct kmem_cache *s;
8189 	int ret = 0;
8190 
8191 	/*
8192 	 * We are bringing a node online. No memory is available yet. We must
8193 	 * allocate a kmem_cache_node structure in order to bring the node
8194 	 * online.
8195 	 */
8196 	mutex_lock(&slab_mutex);
8197 	list_for_each_entry(s, &slab_caches, list) {
8198 		struct node_barn *barn = NULL;
8199 
8200 		/*
8201 		 * The structure may already exist if the node was previously
8202 		 * onlined and offlined.
8203 		 */
8204 		if (get_node(s, nid))
8205 			continue;
8206 
8207 		if (cache_has_sheaves(s)) {
8208 			barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid);
8209 
8210 			if (!barn) {
8211 				ret = -ENOMEM;
8212 				goto out;
8213 			}
8214 		}
8215 
8216 		/*
8217 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
8218 		 *      since memory is not yet available from the node that
8219 		 *      is brought up.
8220 		 */
8221 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
8222 		if (!n) {
8223 			kfree(barn);
8224 			ret = -ENOMEM;
8225 			goto out;
8226 		}
8227 
8228 		init_kmem_cache_node(n, barn);
8229 
8230 		s->node[nid] = n;
8231 	}
8232 	/*
8233 	 * Any cache created after this point will also have kmem_cache_node
8234 	 * initialized for the new node.
8235 	 */
8236 	node_set(nid, slab_nodes);
8237 out:
8238 	mutex_unlock(&slab_mutex);
8239 	return ret;
8240 }
8241 
slab_memory_callback(struct notifier_block * self,unsigned long action,void * arg)8242 static int slab_memory_callback(struct notifier_block *self,
8243 				unsigned long action, void *arg)
8244 {
8245 	struct node_notify *nn = arg;
8246 	int nid = nn->nid;
8247 	int ret = 0;
8248 
8249 	switch (action) {
8250 	case NODE_ADDING_FIRST_MEMORY:
8251 		ret = slab_mem_going_online_callback(nid);
8252 		break;
8253 	case NODE_REMOVING_LAST_MEMORY:
8254 		ret = slab_mem_going_offline_callback();
8255 		break;
8256 	}
8257 	if (ret)
8258 		ret = notifier_from_errno(ret);
8259 	else
8260 		ret = NOTIFY_OK;
8261 	return ret;
8262 }
8263 
8264 /********************************************************************
8265  *			Basic setup of slabs
8266  *******************************************************************/
8267 
8268 /*
8269  * Used for early kmem_cache structures that were allocated using
8270  * the page allocator. Allocate them properly then fix up the pointers
8271  * that may be pointing to the wrong kmem_cache structure.
8272  */
8273 
bootstrap(struct kmem_cache * static_cache)8274 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
8275 {
8276 	int node;
8277 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
8278 	struct kmem_cache_node *n;
8279 
8280 	memcpy(s, static_cache, kmem_cache->object_size);
8281 
8282 	for_each_kmem_cache_node(s, node, n) {
8283 		struct slab *p;
8284 
8285 		list_for_each_entry(p, &n->partial, slab_list)
8286 			p->slab_cache = s;
8287 
8288 #ifdef CONFIG_SLUB_DEBUG
8289 		list_for_each_entry(p, &n->full, slab_list)
8290 			p->slab_cache = s;
8291 #endif
8292 	}
8293 	list_add(&s->list, &slab_caches);
8294 	return s;
8295 }
8296 
8297 /*
8298  * Finish the sheaves initialization done normally by init_percpu_sheaves() and
8299  * init_kmem_cache_nodes(). For normal kmalloc caches we have to bootstrap it
8300  * since sheaves and barns are allocated by kmalloc.
8301  */
bootstrap_cache_sheaves(struct kmem_cache * s)8302 static void __init bootstrap_cache_sheaves(struct kmem_cache *s)
8303 {
8304 	struct kmem_cache_args empty_args = {};
8305 	unsigned int capacity;
8306 	bool failed = false;
8307 	int node, cpu;
8308 
8309 	capacity = calculate_sheaf_capacity(s, &empty_args);
8310 
8311 	/* capacity can be 0 due to debugging or SLUB_TINY */
8312 	if (!capacity)
8313 		return;
8314 
8315 	for_each_node_mask(node, slab_nodes) {
8316 		struct node_barn *barn;
8317 
8318 		barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
8319 
8320 		if (!barn) {
8321 			failed = true;
8322 			goto out;
8323 		}
8324 
8325 		barn_init(barn);
8326 		get_node(s, node)->barn = barn;
8327 	}
8328 
8329 	for_each_possible_cpu(cpu) {
8330 		struct slub_percpu_sheaves *pcs;
8331 
8332 		pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
8333 
8334 		pcs->main = __alloc_empty_sheaf(s, GFP_KERNEL, capacity);
8335 
8336 		if (!pcs->main) {
8337 			failed = true;
8338 			break;
8339 		}
8340 	}
8341 
8342 out:
8343 	/*
8344 	 * It's still early in boot so treat this like same as a failure to
8345 	 * create the kmalloc cache in the first place
8346 	 */
8347 	if (failed)
8348 		panic("Out of memory when creating kmem_cache %s\n", s->name);
8349 
8350 	s->sheaf_capacity = capacity;
8351 }
8352 
bootstrap_kmalloc_sheaves(void)8353 static void __init bootstrap_kmalloc_sheaves(void)
8354 {
8355 	enum kmalloc_cache_type type;
8356 
8357 	for (type = KMALLOC_NORMAL; type <= KMALLOC_RANDOM_END; type++) {
8358 		for (int idx = 0; idx < KMALLOC_SHIFT_HIGH + 1; idx++) {
8359 			if (kmalloc_caches[type][idx])
8360 				bootstrap_cache_sheaves(kmalloc_caches[type][idx]);
8361 		}
8362 	}
8363 }
8364 
kmem_cache_init(void)8365 void __init kmem_cache_init(void)
8366 {
8367 	static __initdata struct kmem_cache boot_kmem_cache,
8368 		boot_kmem_cache_node;
8369 	int node;
8370 
8371 	if (debug_guardpage_minorder())
8372 		slub_max_order = 0;
8373 
8374 	/* Inform pointer hashing choice about slub debugging state. */
8375 	hash_pointers_finalize(__slub_debug_enabled());
8376 
8377 	kmem_cache_node = &boot_kmem_cache_node;
8378 	kmem_cache = &boot_kmem_cache;
8379 
8380 	/*
8381 	 * Initialize the nodemask for which we will allocate per node
8382 	 * structures. Here we don't need taking slab_mutex yet.
8383 	 */
8384 	for_each_node_state(node, N_MEMORY)
8385 		node_set(node, slab_nodes);
8386 
8387 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
8388 			sizeof(struct kmem_cache_node),
8389 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
8390 
8391 	hotplug_node_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
8392 
8393 	/* Able to allocate the per node structures */
8394 	slab_state = PARTIAL;
8395 
8396 	create_boot_cache(kmem_cache, "kmem_cache",
8397 			offsetof(struct kmem_cache, node) +
8398 				nr_node_ids * sizeof(struct kmem_cache_node *),
8399 			SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
8400 
8401 	kmem_cache = bootstrap(&boot_kmem_cache);
8402 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
8403 
8404 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
8405 	setup_kmalloc_cache_index_table();
8406 	create_kmalloc_caches();
8407 
8408 	bootstrap_kmalloc_sheaves();
8409 
8410 	/* Setup random freelists for each cache */
8411 	init_freelist_randomization();
8412 
8413 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
8414 				  slub_cpu_dead);
8415 
8416 	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
8417 		cache_line_size(),
8418 		slub_min_order, slub_max_order, slub_min_objects,
8419 		nr_cpu_ids, nr_node_ids);
8420 }
8421 
kmem_cache_init_late(void)8422 void __init kmem_cache_init_late(void)
8423 {
8424 	flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM | WQ_PERCPU,
8425 				  0);
8426 	WARN_ON(!flushwq);
8427 #ifdef CONFIG_SLAB_FREELIST_RANDOM
8428 	prandom_init_once(&slab_rnd_state);
8429 #endif
8430 }
8431 
do_kmem_cache_create(struct kmem_cache * s,const char * name,unsigned int size,struct kmem_cache_args * args,slab_flags_t flags)8432 int do_kmem_cache_create(struct kmem_cache *s, const char *name,
8433 			 unsigned int size, struct kmem_cache_args *args,
8434 			 slab_flags_t flags)
8435 {
8436 	int err = -EINVAL;
8437 
8438 	s->name = name;
8439 	s->size = s->object_size = size;
8440 
8441 	s->flags = kmem_cache_flags(flags, s->name);
8442 #ifdef CONFIG_SLAB_FREELIST_HARDENED
8443 	s->random = get_random_long();
8444 #endif
8445 	s->align = args->align;
8446 	s->ctor = args->ctor;
8447 #ifdef CONFIG_HARDENED_USERCOPY
8448 	s->useroffset = args->useroffset;
8449 	s->usersize = args->usersize;
8450 #endif
8451 
8452 	if (!calculate_sizes(args, s))
8453 		goto out;
8454 	if (disable_higher_order_debug) {
8455 		/*
8456 		 * Disable debugging flags that store metadata if the min slab
8457 		 * order increased.
8458 		 */
8459 		if (get_order(s->size) > get_order(s->object_size)) {
8460 			s->flags &= ~DEBUG_METADATA_FLAGS;
8461 			s->offset = 0;
8462 			if (!calculate_sizes(args, s))
8463 				goto out;
8464 		}
8465 	}
8466 
8467 #ifdef system_has_freelist_aba
8468 	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
8469 		/* Enable fast mode */
8470 		s->flags |= __CMPXCHG_DOUBLE;
8471 	}
8472 #endif
8473 
8474 	/*
8475 	 * The larger the object size is, the more slabs we want on the partial
8476 	 * list to avoid pounding the page allocator excessively.
8477 	 */
8478 	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
8479 	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
8480 
8481 	s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves);
8482 	if (!s->cpu_sheaves) {
8483 		err = -ENOMEM;
8484 		goto out;
8485 	}
8486 
8487 #ifdef CONFIG_NUMA
8488 	s->remote_node_defrag_ratio = 1000;
8489 #endif
8490 
8491 	/* Initialize the pre-computed randomized freelist if slab is up */
8492 	if (slab_state >= UP) {
8493 		if (init_cache_random_seq(s))
8494 			goto out;
8495 	}
8496 
8497 	if (!init_kmem_cache_nodes(s))
8498 		goto out;
8499 
8500 #ifdef CONFIG_SLUB_STATS
8501 	if (!alloc_kmem_cache_stats(s))
8502 		goto out;
8503 #endif
8504 
8505 	err = init_percpu_sheaves(s);
8506 	if (err)
8507 		goto out;
8508 
8509 	err = 0;
8510 
8511 	/* Mutex is not taken during early boot */
8512 	if (slab_state <= UP)
8513 		goto out;
8514 
8515 	/*
8516 	 * Failing to create sysfs files is not critical to SLUB functionality.
8517 	 * If it fails, proceed with cache creation without these files.
8518 	 */
8519 	if (sysfs_slab_add(s))
8520 		pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name);
8521 
8522 	if (s->flags & SLAB_STORE_USER)
8523 		debugfs_slab_add(s);
8524 
8525 out:
8526 	if (err)
8527 		__kmem_cache_release(s);
8528 	return err;
8529 }
8530 
8531 #ifdef SLAB_SUPPORTS_SYSFS
count_inuse(struct slab * slab)8532 static int count_inuse(struct slab *slab)
8533 {
8534 	return slab->inuse;
8535 }
8536 
count_total(struct slab * slab)8537 static int count_total(struct slab *slab)
8538 {
8539 	return slab->objects;
8540 }
8541 #endif
8542 
8543 #ifdef CONFIG_SLUB_DEBUG
validate_slab(struct kmem_cache * s,struct slab * slab,unsigned long * obj_map)8544 static void validate_slab(struct kmem_cache *s, struct slab *slab,
8545 			  unsigned long *obj_map)
8546 {
8547 	void *p;
8548 	void *addr = slab_address(slab);
8549 
8550 	if (!validate_slab_ptr(slab)) {
8551 		slab_err(s, slab, "Not a valid slab page");
8552 		return;
8553 	}
8554 
8555 	if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
8556 		return;
8557 
8558 	/* Now we know that a valid freelist exists */
8559 	__fill_map(obj_map, s, slab);
8560 	for_each_object(p, s, addr, slab->objects) {
8561 		u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
8562 			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
8563 
8564 		if (!check_object(s, slab, p, val))
8565 			break;
8566 	}
8567 }
8568 
validate_slab_node(struct kmem_cache * s,struct kmem_cache_node * n,unsigned long * obj_map)8569 static int validate_slab_node(struct kmem_cache *s,
8570 		struct kmem_cache_node *n, unsigned long *obj_map)
8571 {
8572 	unsigned long count = 0;
8573 	struct slab *slab;
8574 	unsigned long flags;
8575 
8576 	spin_lock_irqsave(&n->list_lock, flags);
8577 
8578 	list_for_each_entry(slab, &n->partial, slab_list) {
8579 		validate_slab(s, slab, obj_map);
8580 		count++;
8581 	}
8582 	if (count != n->nr_partial) {
8583 		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
8584 		       s->name, count, n->nr_partial);
8585 		slab_add_kunit_errors();
8586 	}
8587 
8588 	if (!(s->flags & SLAB_STORE_USER))
8589 		goto out;
8590 
8591 	list_for_each_entry(slab, &n->full, slab_list) {
8592 		validate_slab(s, slab, obj_map);
8593 		count++;
8594 	}
8595 	if (count != node_nr_slabs(n)) {
8596 		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
8597 		       s->name, count, node_nr_slabs(n));
8598 		slab_add_kunit_errors();
8599 	}
8600 
8601 out:
8602 	spin_unlock_irqrestore(&n->list_lock, flags);
8603 	return count;
8604 }
8605 
validate_slab_cache(struct kmem_cache * s)8606 long validate_slab_cache(struct kmem_cache *s)
8607 {
8608 	int node;
8609 	unsigned long count = 0;
8610 	struct kmem_cache_node *n;
8611 	unsigned long *obj_map;
8612 
8613 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
8614 	if (!obj_map)
8615 		return -ENOMEM;
8616 
8617 	flush_all(s);
8618 	for_each_kmem_cache_node(s, node, n)
8619 		count += validate_slab_node(s, n, obj_map);
8620 
8621 	bitmap_free(obj_map);
8622 
8623 	return count;
8624 }
8625 EXPORT_SYMBOL(validate_slab_cache);
8626 
8627 #ifdef CONFIG_DEBUG_FS
8628 /*
8629  * Generate lists of code addresses where slabcache objects are allocated
8630  * and freed.
8631  */
8632 
8633 struct location {
8634 	depot_stack_handle_t handle;
8635 	unsigned long count;
8636 	unsigned long addr;
8637 	unsigned long waste;
8638 	long long sum_time;
8639 	long min_time;
8640 	long max_time;
8641 	long min_pid;
8642 	long max_pid;
8643 	DECLARE_BITMAP(cpus, NR_CPUS);
8644 	nodemask_t nodes;
8645 };
8646 
8647 struct loc_track {
8648 	unsigned long max;
8649 	unsigned long count;
8650 	struct location *loc;
8651 	loff_t idx;
8652 };
8653 
8654 static struct dentry *slab_debugfs_root;
8655 
free_loc_track(struct loc_track * t)8656 static void free_loc_track(struct loc_track *t)
8657 {
8658 	if (t->max)
8659 		free_pages((unsigned long)t->loc,
8660 			get_order(sizeof(struct location) * t->max));
8661 }
8662 
alloc_loc_track(struct loc_track * t,unsigned long max,gfp_t flags)8663 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
8664 {
8665 	struct location *l;
8666 	int order;
8667 
8668 	order = get_order(sizeof(struct location) * max);
8669 
8670 	l = (void *)__get_free_pages(flags, order);
8671 	if (!l)
8672 		return 0;
8673 
8674 	if (t->count) {
8675 		memcpy(l, t->loc, sizeof(struct location) * t->count);
8676 		free_loc_track(t);
8677 	}
8678 	t->max = max;
8679 	t->loc = l;
8680 	return 1;
8681 }
8682 
add_location(struct loc_track * t,struct kmem_cache * s,const struct track * track,unsigned int orig_size)8683 static int add_location(struct loc_track *t, struct kmem_cache *s,
8684 				const struct track *track,
8685 				unsigned int orig_size)
8686 {
8687 	long start, end, pos;
8688 	struct location *l;
8689 	unsigned long caddr, chandle, cwaste;
8690 	unsigned long age = jiffies - track->when;
8691 	depot_stack_handle_t handle = 0;
8692 	unsigned int waste = s->object_size - orig_size;
8693 
8694 #ifdef CONFIG_STACKDEPOT
8695 	handle = READ_ONCE(track->handle);
8696 #endif
8697 	start = -1;
8698 	end = t->count;
8699 
8700 	for ( ; ; ) {
8701 		pos = start + (end - start + 1) / 2;
8702 
8703 		/*
8704 		 * There is nothing at "end". If we end up there
8705 		 * we need to add something to before end.
8706 		 */
8707 		if (pos == end)
8708 			break;
8709 
8710 		l = &t->loc[pos];
8711 		caddr = l->addr;
8712 		chandle = l->handle;
8713 		cwaste = l->waste;
8714 		if ((track->addr == caddr) && (handle == chandle) &&
8715 			(waste == cwaste)) {
8716 
8717 			l->count++;
8718 			if (track->when) {
8719 				l->sum_time += age;
8720 				if (age < l->min_time)
8721 					l->min_time = age;
8722 				if (age > l->max_time)
8723 					l->max_time = age;
8724 
8725 				if (track->pid < l->min_pid)
8726 					l->min_pid = track->pid;
8727 				if (track->pid > l->max_pid)
8728 					l->max_pid = track->pid;
8729 
8730 				cpumask_set_cpu(track->cpu,
8731 						to_cpumask(l->cpus));
8732 			}
8733 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
8734 			return 1;
8735 		}
8736 
8737 		if (track->addr < caddr)
8738 			end = pos;
8739 		else if (track->addr == caddr && handle < chandle)
8740 			end = pos;
8741 		else if (track->addr == caddr && handle == chandle &&
8742 				waste < cwaste)
8743 			end = pos;
8744 		else
8745 			start = pos;
8746 	}
8747 
8748 	/*
8749 	 * Not found. Insert new tracking element.
8750 	 */
8751 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
8752 		return 0;
8753 
8754 	l = t->loc + pos;
8755 	if (pos < t->count)
8756 		memmove(l + 1, l,
8757 			(t->count - pos) * sizeof(struct location));
8758 	t->count++;
8759 	l->count = 1;
8760 	l->addr = track->addr;
8761 	l->sum_time = age;
8762 	l->min_time = age;
8763 	l->max_time = age;
8764 	l->min_pid = track->pid;
8765 	l->max_pid = track->pid;
8766 	l->handle = handle;
8767 	l->waste = waste;
8768 	cpumask_clear(to_cpumask(l->cpus));
8769 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
8770 	nodes_clear(l->nodes);
8771 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
8772 	return 1;
8773 }
8774 
process_slab(struct loc_track * t,struct kmem_cache * s,struct slab * slab,enum track_item alloc,unsigned long * obj_map)8775 static void process_slab(struct loc_track *t, struct kmem_cache *s,
8776 		struct slab *slab, enum track_item alloc,
8777 		unsigned long *obj_map)
8778 {
8779 	void *addr = slab_address(slab);
8780 	bool is_alloc = (alloc == TRACK_ALLOC);
8781 	void *p;
8782 
8783 	__fill_map(obj_map, s, slab);
8784 
8785 	for_each_object(p, s, addr, slab->objects)
8786 		if (!test_bit(__obj_to_index(s, addr, p), obj_map))
8787 			add_location(t, s, get_track(s, p, alloc),
8788 				     is_alloc ? get_orig_size(s, p) :
8789 						s->object_size);
8790 }
8791 #endif  /* CONFIG_DEBUG_FS   */
8792 #endif	/* CONFIG_SLUB_DEBUG */
8793 
8794 #ifdef SLAB_SUPPORTS_SYSFS
8795 enum slab_stat_type {
8796 	SL_ALL,			/* All slabs */
8797 	SL_PARTIAL,		/* Only partially allocated slabs */
8798 	SL_CPU,			/* Only slabs used for cpu caches */
8799 	SL_OBJECTS,		/* Determine allocated objects not slabs */
8800 	SL_TOTAL		/* Determine object capacity not slabs */
8801 };
8802 
8803 #define SO_ALL		(1 << SL_ALL)
8804 #define SO_PARTIAL	(1 << SL_PARTIAL)
8805 #define SO_CPU		(1 << SL_CPU)
8806 #define SO_OBJECTS	(1 << SL_OBJECTS)
8807 #define SO_TOTAL	(1 << SL_TOTAL)
8808 
show_slab_objects(struct kmem_cache * s,char * buf,unsigned long flags)8809 static ssize_t show_slab_objects(struct kmem_cache *s,
8810 				 char *buf, unsigned long flags)
8811 {
8812 	unsigned long total = 0;
8813 	int node;
8814 	int x;
8815 	unsigned long *nodes;
8816 	int len = 0;
8817 
8818 	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
8819 	if (!nodes)
8820 		return -ENOMEM;
8821 
8822 	/*
8823 	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
8824 	 * already held which will conflict with an existing lock order:
8825 	 *
8826 	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
8827 	 *
8828 	 * We don't really need mem_hotplug_lock (to hold off
8829 	 * slab_mem_going_offline_callback) here because slab's memory hot
8830 	 * unplug code doesn't destroy the kmem_cache->node[] data.
8831 	 */
8832 
8833 #ifdef CONFIG_SLUB_DEBUG
8834 	if (flags & SO_ALL) {
8835 		struct kmem_cache_node *n;
8836 
8837 		for_each_kmem_cache_node(s, node, n) {
8838 
8839 			if (flags & SO_TOTAL)
8840 				x = node_nr_objs(n);
8841 			else if (flags & SO_OBJECTS)
8842 				x = node_nr_objs(n) - count_partial(n, count_free);
8843 			else
8844 				x = node_nr_slabs(n);
8845 			total += x;
8846 			nodes[node] += x;
8847 		}
8848 
8849 	} else
8850 #endif
8851 	if (flags & SO_PARTIAL) {
8852 		struct kmem_cache_node *n;
8853 
8854 		for_each_kmem_cache_node(s, node, n) {
8855 			if (flags & SO_TOTAL)
8856 				x = count_partial(n, count_total);
8857 			else if (flags & SO_OBJECTS)
8858 				x = count_partial(n, count_inuse);
8859 			else
8860 				x = n->nr_partial;
8861 			total += x;
8862 			nodes[node] += x;
8863 		}
8864 	}
8865 
8866 	len += sysfs_emit_at(buf, len, "%lu", total);
8867 #ifdef CONFIG_NUMA
8868 	for (node = 0; node < nr_node_ids; node++) {
8869 		if (nodes[node])
8870 			len += sysfs_emit_at(buf, len, " N%d=%lu",
8871 					     node, nodes[node]);
8872 	}
8873 #endif
8874 	len += sysfs_emit_at(buf, len, "\n");
8875 	kfree(nodes);
8876 
8877 	return len;
8878 }
8879 
8880 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
8881 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
8882 
8883 struct slab_attribute {
8884 	struct attribute attr;
8885 	ssize_t (*show)(struct kmem_cache *s, char *buf);
8886 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
8887 };
8888 
8889 #define SLAB_ATTR_RO(_name) \
8890 	static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
8891 
8892 #define SLAB_ATTR(_name) \
8893 	static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
8894 
slab_size_show(struct kmem_cache * s,char * buf)8895 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
8896 {
8897 	return sysfs_emit(buf, "%u\n", s->size);
8898 }
8899 SLAB_ATTR_RO(slab_size);
8900 
align_show(struct kmem_cache * s,char * buf)8901 static ssize_t align_show(struct kmem_cache *s, char *buf)
8902 {
8903 	return sysfs_emit(buf, "%u\n", s->align);
8904 }
8905 SLAB_ATTR_RO(align);
8906 
object_size_show(struct kmem_cache * s,char * buf)8907 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
8908 {
8909 	return sysfs_emit(buf, "%u\n", s->object_size);
8910 }
8911 SLAB_ATTR_RO(object_size);
8912 
objs_per_slab_show(struct kmem_cache * s,char * buf)8913 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
8914 {
8915 	return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
8916 }
8917 SLAB_ATTR_RO(objs_per_slab);
8918 
order_show(struct kmem_cache * s,char * buf)8919 static ssize_t order_show(struct kmem_cache *s, char *buf)
8920 {
8921 	return sysfs_emit(buf, "%u\n", oo_order(s->oo));
8922 }
8923 SLAB_ATTR_RO(order);
8924 
sheaf_capacity_show(struct kmem_cache * s,char * buf)8925 static ssize_t sheaf_capacity_show(struct kmem_cache *s, char *buf)
8926 {
8927 	return sysfs_emit(buf, "%u\n", s->sheaf_capacity);
8928 }
8929 SLAB_ATTR_RO(sheaf_capacity);
8930 
min_partial_show(struct kmem_cache * s,char * buf)8931 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
8932 {
8933 	return sysfs_emit(buf, "%lu\n", s->min_partial);
8934 }
8935 
min_partial_store(struct kmem_cache * s,const char * buf,size_t length)8936 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
8937 				 size_t length)
8938 {
8939 	unsigned long min;
8940 	int err;
8941 
8942 	err = kstrtoul(buf, 10, &min);
8943 	if (err)
8944 		return err;
8945 
8946 	s->min_partial = min;
8947 	return length;
8948 }
8949 SLAB_ATTR(min_partial);
8950 
cpu_partial_show(struct kmem_cache * s,char * buf)8951 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
8952 {
8953 	return sysfs_emit(buf, "0\n");
8954 }
8955 
cpu_partial_store(struct kmem_cache * s,const char * buf,size_t length)8956 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
8957 				 size_t length)
8958 {
8959 	unsigned int objects;
8960 	int err;
8961 
8962 	err = kstrtouint(buf, 10, &objects);
8963 	if (err)
8964 		return err;
8965 	if (objects)
8966 		return -EINVAL;
8967 
8968 	return length;
8969 }
8970 SLAB_ATTR(cpu_partial);
8971 
ctor_show(struct kmem_cache * s,char * buf)8972 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
8973 {
8974 	if (!s->ctor)
8975 		return 0;
8976 	return sysfs_emit(buf, "%pS\n", s->ctor);
8977 }
8978 SLAB_ATTR_RO(ctor);
8979 
aliases_show(struct kmem_cache * s,char * buf)8980 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
8981 {
8982 	return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
8983 }
8984 SLAB_ATTR_RO(aliases);
8985 
partial_show(struct kmem_cache * s,char * buf)8986 static ssize_t partial_show(struct kmem_cache *s, char *buf)
8987 {
8988 	return show_slab_objects(s, buf, SO_PARTIAL);
8989 }
8990 SLAB_ATTR_RO(partial);
8991 
cpu_slabs_show(struct kmem_cache * s,char * buf)8992 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
8993 {
8994 	return show_slab_objects(s, buf, SO_CPU);
8995 }
8996 SLAB_ATTR_RO(cpu_slabs);
8997 
objects_partial_show(struct kmem_cache * s,char * buf)8998 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
8999 {
9000 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
9001 }
9002 SLAB_ATTR_RO(objects_partial);
9003 
slabs_cpu_partial_show(struct kmem_cache * s,char * buf)9004 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
9005 {
9006 	return sysfs_emit(buf, "0(0)\n");
9007 }
9008 SLAB_ATTR_RO(slabs_cpu_partial);
9009 
reclaim_account_show(struct kmem_cache * s,char * buf)9010 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
9011 {
9012 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
9013 }
9014 SLAB_ATTR_RO(reclaim_account);
9015 
hwcache_align_show(struct kmem_cache * s,char * buf)9016 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
9017 {
9018 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
9019 }
9020 SLAB_ATTR_RO(hwcache_align);
9021 
9022 #ifdef CONFIG_ZONE_DMA
cache_dma_show(struct kmem_cache * s,char * buf)9023 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
9024 {
9025 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
9026 }
9027 SLAB_ATTR_RO(cache_dma);
9028 #endif
9029 
9030 #ifdef CONFIG_HARDENED_USERCOPY
usersize_show(struct kmem_cache * s,char * buf)9031 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
9032 {
9033 	return sysfs_emit(buf, "%u\n", s->usersize);
9034 }
9035 SLAB_ATTR_RO(usersize);
9036 #endif
9037 
destroy_by_rcu_show(struct kmem_cache * s,char * buf)9038 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
9039 {
9040 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
9041 }
9042 SLAB_ATTR_RO(destroy_by_rcu);
9043 
9044 #ifdef CONFIG_SLUB_DEBUG
slabs_show(struct kmem_cache * s,char * buf)9045 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
9046 {
9047 	return show_slab_objects(s, buf, SO_ALL);
9048 }
9049 SLAB_ATTR_RO(slabs);
9050 
total_objects_show(struct kmem_cache * s,char * buf)9051 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
9052 {
9053 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
9054 }
9055 SLAB_ATTR_RO(total_objects);
9056 
objects_show(struct kmem_cache * s,char * buf)9057 static ssize_t objects_show(struct kmem_cache *s, char *buf)
9058 {
9059 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
9060 }
9061 SLAB_ATTR_RO(objects);
9062 
sanity_checks_show(struct kmem_cache * s,char * buf)9063 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
9064 {
9065 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
9066 }
9067 SLAB_ATTR_RO(sanity_checks);
9068 
trace_show(struct kmem_cache * s,char * buf)9069 static ssize_t trace_show(struct kmem_cache *s, char *buf)
9070 {
9071 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
9072 }
9073 SLAB_ATTR_RO(trace);
9074 
red_zone_show(struct kmem_cache * s,char * buf)9075 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
9076 {
9077 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
9078 }
9079 
9080 SLAB_ATTR_RO(red_zone);
9081 
poison_show(struct kmem_cache * s,char * buf)9082 static ssize_t poison_show(struct kmem_cache *s, char *buf)
9083 {
9084 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
9085 }
9086 
9087 SLAB_ATTR_RO(poison);
9088 
store_user_show(struct kmem_cache * s,char * buf)9089 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
9090 {
9091 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
9092 }
9093 
9094 SLAB_ATTR_RO(store_user);
9095 
validate_show(struct kmem_cache * s,char * buf)9096 static ssize_t validate_show(struct kmem_cache *s, char *buf)
9097 {
9098 	return 0;
9099 }
9100 
validate_store(struct kmem_cache * s,const char * buf,size_t length)9101 static ssize_t validate_store(struct kmem_cache *s,
9102 			const char *buf, size_t length)
9103 {
9104 	int ret = -EINVAL;
9105 
9106 	if (buf[0] == '1' && kmem_cache_debug(s)) {
9107 		ret = validate_slab_cache(s);
9108 		if (ret >= 0)
9109 			ret = length;
9110 	}
9111 	return ret;
9112 }
9113 SLAB_ATTR(validate);
9114 
9115 #endif /* CONFIG_SLUB_DEBUG */
9116 
9117 #ifdef CONFIG_FAILSLAB
failslab_show(struct kmem_cache * s,char * buf)9118 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
9119 {
9120 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
9121 }
9122 
failslab_store(struct kmem_cache * s,const char * buf,size_t length)9123 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
9124 				size_t length)
9125 {
9126 	if (s->refcount > 1)
9127 		return -EINVAL;
9128 
9129 	if (buf[0] == '1')
9130 		WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
9131 	else
9132 		WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
9133 
9134 	return length;
9135 }
9136 SLAB_ATTR(failslab);
9137 #endif
9138 
shrink_show(struct kmem_cache * s,char * buf)9139 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
9140 {
9141 	return 0;
9142 }
9143 
shrink_store(struct kmem_cache * s,const char * buf,size_t length)9144 static ssize_t shrink_store(struct kmem_cache *s,
9145 			const char *buf, size_t length)
9146 {
9147 	if (buf[0] == '1')
9148 		kmem_cache_shrink(s);
9149 	else
9150 		return -EINVAL;
9151 	return length;
9152 }
9153 SLAB_ATTR(shrink);
9154 
9155 #ifdef CONFIG_NUMA
remote_node_defrag_ratio_show(struct kmem_cache * s,char * buf)9156 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
9157 {
9158 	return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
9159 }
9160 
remote_node_defrag_ratio_store(struct kmem_cache * s,const char * buf,size_t length)9161 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
9162 				const char *buf, size_t length)
9163 {
9164 	unsigned int ratio;
9165 	int err;
9166 
9167 	err = kstrtouint(buf, 10, &ratio);
9168 	if (err)
9169 		return err;
9170 	if (ratio > 100)
9171 		return -ERANGE;
9172 
9173 	s->remote_node_defrag_ratio = ratio * 10;
9174 
9175 	return length;
9176 }
9177 SLAB_ATTR(remote_node_defrag_ratio);
9178 #endif
9179 
9180 #ifdef CONFIG_SLUB_STATS
show_stat(struct kmem_cache * s,char * buf,enum stat_item si)9181 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
9182 {
9183 	unsigned long sum  = 0;
9184 	int cpu;
9185 	int len = 0;
9186 	int *data = kmalloc_objs(int, nr_cpu_ids);
9187 
9188 	if (!data)
9189 		return -ENOMEM;
9190 
9191 	for_each_online_cpu(cpu) {
9192 		unsigned int x = per_cpu_ptr(s->cpu_stats, cpu)->stat[si];
9193 
9194 		data[cpu] = x;
9195 		sum += x;
9196 	}
9197 
9198 	len += sysfs_emit_at(buf, len, "%lu", sum);
9199 
9200 #ifdef CONFIG_SMP
9201 	for_each_online_cpu(cpu) {
9202 		if (data[cpu])
9203 			len += sysfs_emit_at(buf, len, " C%d=%u",
9204 					     cpu, data[cpu]);
9205 	}
9206 #endif
9207 	kfree(data);
9208 	len += sysfs_emit_at(buf, len, "\n");
9209 
9210 	return len;
9211 }
9212 
clear_stat(struct kmem_cache * s,enum stat_item si)9213 static void clear_stat(struct kmem_cache *s, enum stat_item si)
9214 {
9215 	int cpu;
9216 
9217 	for_each_online_cpu(cpu)
9218 		per_cpu_ptr(s->cpu_stats, cpu)->stat[si] = 0;
9219 }
9220 
9221 #define STAT_ATTR(si, text) 					\
9222 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
9223 {								\
9224 	return show_stat(s, buf, si);				\
9225 }								\
9226 static ssize_t text##_store(struct kmem_cache *s,		\
9227 				const char *buf, size_t length)	\
9228 {								\
9229 	if (buf[0] != '0')					\
9230 		return -EINVAL;					\
9231 	clear_stat(s, si);					\
9232 	return length;						\
9233 }								\
9234 SLAB_ATTR(text);						\
9235 
9236 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
9237 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
9238 STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf);
9239 STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail);
9240 STAT_ATTR(FREE_FASTPATH, free_fastpath);
9241 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
9242 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
9243 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
9244 STAT_ATTR(ALLOC_SLAB, alloc_slab);
9245 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
9246 STAT_ATTR(FREE_SLAB, free_slab);
9247 STAT_ATTR(ORDER_FALLBACK, order_fallback);
9248 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
9249 STAT_ATTR(SHEAF_FLUSH, sheaf_flush);
9250 STAT_ATTR(SHEAF_REFILL, sheaf_refill);
9251 STAT_ATTR(SHEAF_ALLOC, sheaf_alloc);
9252 STAT_ATTR(SHEAF_FREE, sheaf_free);
9253 STAT_ATTR(BARN_GET, barn_get);
9254 STAT_ATTR(BARN_GET_FAIL, barn_get_fail);
9255 STAT_ATTR(BARN_PUT, barn_put);
9256 STAT_ATTR(BARN_PUT_FAIL, barn_put_fail);
9257 STAT_ATTR(SHEAF_PREFILL_FAST, sheaf_prefill_fast);
9258 STAT_ATTR(SHEAF_PREFILL_SLOW, sheaf_prefill_slow);
9259 STAT_ATTR(SHEAF_PREFILL_OVERSIZE, sheaf_prefill_oversize);
9260 STAT_ATTR(SHEAF_RETURN_FAST, sheaf_return_fast);
9261 STAT_ATTR(SHEAF_RETURN_SLOW, sheaf_return_slow);
9262 #endif	/* CONFIG_SLUB_STATS */
9263 
9264 #ifdef CONFIG_KFENCE
skip_kfence_show(struct kmem_cache * s,char * buf)9265 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
9266 {
9267 	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
9268 }
9269 
skip_kfence_store(struct kmem_cache * s,const char * buf,size_t length)9270 static ssize_t skip_kfence_store(struct kmem_cache *s,
9271 			const char *buf, size_t length)
9272 {
9273 	int ret = length;
9274 
9275 	if (buf[0] == '0')
9276 		s->flags &= ~SLAB_SKIP_KFENCE;
9277 	else if (buf[0] == '1')
9278 		s->flags |= SLAB_SKIP_KFENCE;
9279 	else
9280 		ret = -EINVAL;
9281 
9282 	return ret;
9283 }
9284 SLAB_ATTR(skip_kfence);
9285 #endif
9286 
9287 static struct attribute *slab_attrs[] = {
9288 	&slab_size_attr.attr,
9289 	&object_size_attr.attr,
9290 	&objs_per_slab_attr.attr,
9291 	&order_attr.attr,
9292 	&sheaf_capacity_attr.attr,
9293 	&min_partial_attr.attr,
9294 	&cpu_partial_attr.attr,
9295 	&objects_partial_attr.attr,
9296 	&partial_attr.attr,
9297 	&cpu_slabs_attr.attr,
9298 	&ctor_attr.attr,
9299 	&aliases_attr.attr,
9300 	&align_attr.attr,
9301 	&hwcache_align_attr.attr,
9302 	&reclaim_account_attr.attr,
9303 	&destroy_by_rcu_attr.attr,
9304 	&shrink_attr.attr,
9305 	&slabs_cpu_partial_attr.attr,
9306 #ifdef CONFIG_SLUB_DEBUG
9307 	&total_objects_attr.attr,
9308 	&objects_attr.attr,
9309 	&slabs_attr.attr,
9310 	&sanity_checks_attr.attr,
9311 	&trace_attr.attr,
9312 	&red_zone_attr.attr,
9313 	&poison_attr.attr,
9314 	&store_user_attr.attr,
9315 	&validate_attr.attr,
9316 #endif
9317 #ifdef CONFIG_ZONE_DMA
9318 	&cache_dma_attr.attr,
9319 #endif
9320 #ifdef CONFIG_NUMA
9321 	&remote_node_defrag_ratio_attr.attr,
9322 #endif
9323 #ifdef CONFIG_SLUB_STATS
9324 	&alloc_fastpath_attr.attr,
9325 	&alloc_slowpath_attr.attr,
9326 	&free_rcu_sheaf_attr.attr,
9327 	&free_rcu_sheaf_fail_attr.attr,
9328 	&free_fastpath_attr.attr,
9329 	&free_slowpath_attr.attr,
9330 	&free_add_partial_attr.attr,
9331 	&free_remove_partial_attr.attr,
9332 	&alloc_slab_attr.attr,
9333 	&alloc_node_mismatch_attr.attr,
9334 	&free_slab_attr.attr,
9335 	&order_fallback_attr.attr,
9336 	&cmpxchg_double_fail_attr.attr,
9337 	&sheaf_flush_attr.attr,
9338 	&sheaf_refill_attr.attr,
9339 	&sheaf_alloc_attr.attr,
9340 	&sheaf_free_attr.attr,
9341 	&barn_get_attr.attr,
9342 	&barn_get_fail_attr.attr,
9343 	&barn_put_attr.attr,
9344 	&barn_put_fail_attr.attr,
9345 	&sheaf_prefill_fast_attr.attr,
9346 	&sheaf_prefill_slow_attr.attr,
9347 	&sheaf_prefill_oversize_attr.attr,
9348 	&sheaf_return_fast_attr.attr,
9349 	&sheaf_return_slow_attr.attr,
9350 #endif
9351 #ifdef CONFIG_FAILSLAB
9352 	&failslab_attr.attr,
9353 #endif
9354 #ifdef CONFIG_HARDENED_USERCOPY
9355 	&usersize_attr.attr,
9356 #endif
9357 #ifdef CONFIG_KFENCE
9358 	&skip_kfence_attr.attr,
9359 #endif
9360 
9361 	NULL
9362 };
9363 
9364 static const struct attribute_group slab_attr_group = {
9365 	.attrs = slab_attrs,
9366 };
9367 
slab_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)9368 static ssize_t slab_attr_show(struct kobject *kobj,
9369 				struct attribute *attr,
9370 				char *buf)
9371 {
9372 	struct slab_attribute *attribute;
9373 	struct kmem_cache *s;
9374 
9375 	attribute = to_slab_attr(attr);
9376 	s = to_slab(kobj);
9377 
9378 	if (!attribute->show)
9379 		return -EIO;
9380 
9381 	return attribute->show(s, buf);
9382 }
9383 
slab_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)9384 static ssize_t slab_attr_store(struct kobject *kobj,
9385 				struct attribute *attr,
9386 				const char *buf, size_t len)
9387 {
9388 	struct slab_attribute *attribute;
9389 	struct kmem_cache *s;
9390 
9391 	attribute = to_slab_attr(attr);
9392 	s = to_slab(kobj);
9393 
9394 	if (!attribute->store)
9395 		return -EIO;
9396 
9397 	return attribute->store(s, buf, len);
9398 }
9399 
kmem_cache_release(struct kobject * k)9400 static void kmem_cache_release(struct kobject *k)
9401 {
9402 	slab_kmem_cache_release(to_slab(k));
9403 }
9404 
9405 static const struct sysfs_ops slab_sysfs_ops = {
9406 	.show = slab_attr_show,
9407 	.store = slab_attr_store,
9408 };
9409 
9410 static const struct kobj_type slab_ktype = {
9411 	.sysfs_ops = &slab_sysfs_ops,
9412 	.release = kmem_cache_release,
9413 };
9414 
9415 static struct kset *slab_kset;
9416 
cache_kset(struct kmem_cache * s)9417 static inline struct kset *cache_kset(struct kmem_cache *s)
9418 {
9419 	return slab_kset;
9420 }
9421 
9422 #define ID_STR_LENGTH 32
9423 
9424 /* Create a unique string id for a slab cache:
9425  *
9426  * Format	:[flags-]size
9427  */
create_unique_id(struct kmem_cache * s)9428 static char *create_unique_id(struct kmem_cache *s)
9429 {
9430 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
9431 	char *p = name;
9432 
9433 	if (!name)
9434 		return ERR_PTR(-ENOMEM);
9435 
9436 	*p++ = ':';
9437 	/*
9438 	 * First flags affecting slabcache operations. We will only
9439 	 * get here for aliasable slabs so we do not need to support
9440 	 * too many flags. The flags here must cover all flags that
9441 	 * are matched during merging to guarantee that the id is
9442 	 * unique.
9443 	 */
9444 	if (s->flags & SLAB_CACHE_DMA)
9445 		*p++ = 'd';
9446 	if (s->flags & SLAB_CACHE_DMA32)
9447 		*p++ = 'D';
9448 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
9449 		*p++ = 'a';
9450 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
9451 		*p++ = 'F';
9452 	if (s->flags & SLAB_ACCOUNT)
9453 		*p++ = 'A';
9454 	if (p != name + 1)
9455 		*p++ = '-';
9456 	p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
9457 
9458 	if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
9459 		kfree(name);
9460 		return ERR_PTR(-EINVAL);
9461 	}
9462 	kmsan_unpoison_memory(name, p - name);
9463 	return name;
9464 }
9465 
sysfs_slab_add(struct kmem_cache * s)9466 static int sysfs_slab_add(struct kmem_cache *s)
9467 {
9468 	int err;
9469 	const char *name;
9470 	struct kset *kset = cache_kset(s);
9471 	int unmergeable = slab_unmergeable(s);
9472 
9473 	if (!unmergeable && disable_higher_order_debug &&
9474 			(slub_debug & DEBUG_METADATA_FLAGS))
9475 		unmergeable = 1;
9476 
9477 	if (unmergeable) {
9478 		/*
9479 		 * Slabcache can never be merged so we can use the name proper.
9480 		 * This is typically the case for debug situations. In that
9481 		 * case we can catch duplicate names easily.
9482 		 */
9483 		sysfs_remove_link(&slab_kset->kobj, s->name);
9484 		name = s->name;
9485 	} else {
9486 		/*
9487 		 * Create a unique name for the slab as a target
9488 		 * for the symlinks.
9489 		 */
9490 		name = create_unique_id(s);
9491 		if (IS_ERR(name))
9492 			return PTR_ERR(name);
9493 	}
9494 
9495 	s->kobj.kset = kset;
9496 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
9497 	if (err)
9498 		goto out;
9499 
9500 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
9501 	if (err)
9502 		goto out_del_kobj;
9503 
9504 	if (!unmergeable) {
9505 		/* Setup first alias */
9506 		sysfs_slab_alias(s, s->name);
9507 	}
9508 out:
9509 	if (!unmergeable)
9510 		kfree(name);
9511 	return err;
9512 out_del_kobj:
9513 	kobject_del(&s->kobj);
9514 	goto out;
9515 }
9516 
sysfs_slab_unlink(struct kmem_cache * s)9517 void sysfs_slab_unlink(struct kmem_cache *s)
9518 {
9519 	if (s->kobj.state_in_sysfs)
9520 		kobject_del(&s->kobj);
9521 }
9522 
sysfs_slab_release(struct kmem_cache * s)9523 void sysfs_slab_release(struct kmem_cache *s)
9524 {
9525 	kobject_put(&s->kobj);
9526 }
9527 
9528 /*
9529  * Need to buffer aliases during bootup until sysfs becomes
9530  * available lest we lose that information.
9531  */
9532 struct saved_alias {
9533 	struct kmem_cache *s;
9534 	const char *name;
9535 	struct saved_alias *next;
9536 };
9537 
9538 static struct saved_alias *alias_list;
9539 
sysfs_slab_alias(struct kmem_cache * s,const char * name)9540 int sysfs_slab_alias(struct kmem_cache *s, const char *name)
9541 {
9542 	struct saved_alias *al;
9543 
9544 	if (slab_state == FULL) {
9545 		/*
9546 		 * If we have a leftover link then remove it.
9547 		 */
9548 		sysfs_remove_link(&slab_kset->kobj, name);
9549 		/*
9550 		 * The original cache may have failed to generate sysfs file.
9551 		 * In that case, sysfs_create_link() returns -ENOENT and
9552 		 * symbolic link creation is skipped.
9553 		 */
9554 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
9555 	}
9556 
9557 	al = kmalloc_obj(struct saved_alias);
9558 	if (!al)
9559 		return -ENOMEM;
9560 
9561 	al->s = s;
9562 	al->name = name;
9563 	al->next = alias_list;
9564 	alias_list = al;
9565 	kmsan_unpoison_memory(al, sizeof(*al));
9566 	return 0;
9567 }
9568 
slab_sysfs_init(void)9569 static int __init slab_sysfs_init(void)
9570 {
9571 	struct kmem_cache *s;
9572 	int err;
9573 
9574 	mutex_lock(&slab_mutex);
9575 
9576 	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
9577 	if (!slab_kset) {
9578 		mutex_unlock(&slab_mutex);
9579 		pr_err("Cannot register slab subsystem.\n");
9580 		return -ENOMEM;
9581 	}
9582 
9583 	slab_state = FULL;
9584 
9585 	list_for_each_entry(s, &slab_caches, list) {
9586 		err = sysfs_slab_add(s);
9587 		if (err)
9588 			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
9589 			       s->name);
9590 	}
9591 
9592 	while (alias_list) {
9593 		struct saved_alias *al = alias_list;
9594 
9595 		alias_list = alias_list->next;
9596 		err = sysfs_slab_alias(al->s, al->name);
9597 		if (err)
9598 			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
9599 			       al->name);
9600 		kfree(al);
9601 	}
9602 
9603 	mutex_unlock(&slab_mutex);
9604 	return 0;
9605 }
9606 late_initcall(slab_sysfs_init);
9607 #endif /* SLAB_SUPPORTS_SYSFS */
9608 
9609 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
slab_debugfs_show(struct seq_file * seq,void * v)9610 static int slab_debugfs_show(struct seq_file *seq, void *v)
9611 {
9612 	struct loc_track *t = seq->private;
9613 	struct location *l;
9614 	unsigned long idx;
9615 
9616 	idx = (unsigned long) t->idx;
9617 	if (idx < t->count) {
9618 		l = &t->loc[idx];
9619 
9620 		seq_printf(seq, "%7ld ", l->count);
9621 
9622 		if (l->addr)
9623 			seq_printf(seq, "%pS", (void *)l->addr);
9624 		else
9625 			seq_puts(seq, "<not-available>");
9626 
9627 		if (l->waste)
9628 			seq_printf(seq, " waste=%lu/%lu",
9629 				l->count * l->waste, l->waste);
9630 
9631 		if (l->sum_time != l->min_time) {
9632 			seq_printf(seq, " age=%ld/%llu/%ld",
9633 				l->min_time, div_u64(l->sum_time, l->count),
9634 				l->max_time);
9635 		} else
9636 			seq_printf(seq, " age=%ld", l->min_time);
9637 
9638 		if (l->min_pid != l->max_pid)
9639 			seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
9640 		else
9641 			seq_printf(seq, " pid=%ld",
9642 				l->min_pid);
9643 
9644 		if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
9645 			seq_printf(seq, " cpus=%*pbl",
9646 				 cpumask_pr_args(to_cpumask(l->cpus)));
9647 
9648 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
9649 			seq_printf(seq, " nodes=%*pbl",
9650 				 nodemask_pr_args(&l->nodes));
9651 
9652 #ifdef CONFIG_STACKDEPOT
9653 		{
9654 			depot_stack_handle_t handle;
9655 			unsigned long *entries;
9656 			unsigned int nr_entries, j;
9657 
9658 			handle = READ_ONCE(l->handle);
9659 			if (handle) {
9660 				nr_entries = stack_depot_fetch(handle, &entries);
9661 				seq_puts(seq, "\n");
9662 				for (j = 0; j < nr_entries; j++)
9663 					seq_printf(seq, "        %pS\n", (void *)entries[j]);
9664 			}
9665 		}
9666 #endif
9667 		seq_puts(seq, "\n");
9668 	}
9669 
9670 	if (!idx && !t->count)
9671 		seq_puts(seq, "No data\n");
9672 
9673 	return 0;
9674 }
9675 
slab_debugfs_stop(struct seq_file * seq,void * v)9676 static void slab_debugfs_stop(struct seq_file *seq, void *v)
9677 {
9678 }
9679 
slab_debugfs_next(struct seq_file * seq,void * v,loff_t * ppos)9680 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
9681 {
9682 	struct loc_track *t = seq->private;
9683 
9684 	t->idx = ++(*ppos);
9685 	if (*ppos <= t->count)
9686 		return ppos;
9687 
9688 	return NULL;
9689 }
9690 
cmp_loc_by_count(const void * a,const void * b)9691 static int cmp_loc_by_count(const void *a, const void *b)
9692 {
9693 	struct location *loc1 = (struct location *)a;
9694 	struct location *loc2 = (struct location *)b;
9695 
9696 	return cmp_int(loc2->count, loc1->count);
9697 }
9698 
slab_debugfs_start(struct seq_file * seq,loff_t * ppos)9699 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
9700 {
9701 	struct loc_track *t = seq->private;
9702 
9703 	t->idx = *ppos;
9704 	return ppos;
9705 }
9706 
9707 static const struct seq_operations slab_debugfs_sops = {
9708 	.start  = slab_debugfs_start,
9709 	.next   = slab_debugfs_next,
9710 	.stop   = slab_debugfs_stop,
9711 	.show   = slab_debugfs_show,
9712 };
9713 
slab_debug_trace_open(struct inode * inode,struct file * filep)9714 static int slab_debug_trace_open(struct inode *inode, struct file *filep)
9715 {
9716 
9717 	struct kmem_cache_node *n;
9718 	enum track_item alloc;
9719 	int node;
9720 	struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
9721 						sizeof(struct loc_track));
9722 	struct kmem_cache *s = file_inode(filep)->i_private;
9723 	unsigned long *obj_map;
9724 
9725 	if (!t)
9726 		return -ENOMEM;
9727 
9728 	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
9729 	if (!obj_map) {
9730 		seq_release_private(inode, filep);
9731 		return -ENOMEM;
9732 	}
9733 
9734 	alloc = debugfs_get_aux_num(filep);
9735 
9736 	if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
9737 		bitmap_free(obj_map);
9738 		seq_release_private(inode, filep);
9739 		return -ENOMEM;
9740 	}
9741 
9742 	for_each_kmem_cache_node(s, node, n) {
9743 		unsigned long flags;
9744 		struct slab *slab;
9745 
9746 		if (!node_nr_slabs(n))
9747 			continue;
9748 
9749 		spin_lock_irqsave(&n->list_lock, flags);
9750 		list_for_each_entry(slab, &n->partial, slab_list)
9751 			process_slab(t, s, slab, alloc, obj_map);
9752 		list_for_each_entry(slab, &n->full, slab_list)
9753 			process_slab(t, s, slab, alloc, obj_map);
9754 		spin_unlock_irqrestore(&n->list_lock, flags);
9755 	}
9756 
9757 	/* Sort locations by count */
9758 	sort(t->loc, t->count, sizeof(struct location),
9759 	     cmp_loc_by_count, NULL);
9760 
9761 	bitmap_free(obj_map);
9762 	return 0;
9763 }
9764 
slab_debug_trace_release(struct inode * inode,struct file * file)9765 static int slab_debug_trace_release(struct inode *inode, struct file *file)
9766 {
9767 	struct seq_file *seq = file->private_data;
9768 	struct loc_track *t = seq->private;
9769 
9770 	free_loc_track(t);
9771 	return seq_release_private(inode, file);
9772 }
9773 
9774 static const struct file_operations slab_debugfs_fops = {
9775 	.open    = slab_debug_trace_open,
9776 	.read    = seq_read,
9777 	.llseek  = seq_lseek,
9778 	.release = slab_debug_trace_release,
9779 };
9780 
debugfs_slab_add(struct kmem_cache * s)9781 static void debugfs_slab_add(struct kmem_cache *s)
9782 {
9783 	struct dentry *slab_cache_dir;
9784 
9785 	if (unlikely(!slab_debugfs_root))
9786 		return;
9787 
9788 	slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
9789 
9790 	debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s,
9791 					TRACK_ALLOC, &slab_debugfs_fops);
9792 
9793 	debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s,
9794 					TRACK_FREE, &slab_debugfs_fops);
9795 }
9796 
debugfs_slab_release(struct kmem_cache * s)9797 void debugfs_slab_release(struct kmem_cache *s)
9798 {
9799 	debugfs_lookup_and_remove(s->name, slab_debugfs_root);
9800 }
9801 
slab_debugfs_init(void)9802 static int __init slab_debugfs_init(void)
9803 {
9804 	struct kmem_cache *s;
9805 
9806 	slab_debugfs_root = debugfs_create_dir("slab", NULL);
9807 
9808 	list_for_each_entry(s, &slab_caches, list)
9809 		if (s->flags & SLAB_STORE_USER)
9810 			debugfs_slab_add(s);
9811 
9812 	return 0;
9813 
9814 }
9815 __initcall(slab_debugfs_init);
9816 #endif
9817 /*
9818  * The /proc/slabinfo ABI
9819  */
9820 #ifdef CONFIG_SLUB_DEBUG
get_slabinfo(struct kmem_cache * s,struct slabinfo * sinfo)9821 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
9822 {
9823 	unsigned long nr_slabs = 0;
9824 	unsigned long nr_objs = 0;
9825 	unsigned long nr_free = 0;
9826 	int node;
9827 	struct kmem_cache_node *n;
9828 
9829 	for_each_kmem_cache_node(s, node, n) {
9830 		nr_slabs += node_nr_slabs(n);
9831 		nr_objs += node_nr_objs(n);
9832 		nr_free += count_partial_free_approx(n);
9833 	}
9834 
9835 	sinfo->active_objs = nr_objs - nr_free;
9836 	sinfo->num_objs = nr_objs;
9837 	sinfo->active_slabs = nr_slabs;
9838 	sinfo->num_slabs = nr_slabs;
9839 	sinfo->objects_per_slab = oo_objects(s->oo);
9840 	sinfo->cache_order = oo_order(s->oo);
9841 }
9842 #endif /* CONFIG_SLUB_DEBUG */
9843