xref: /linux/mm/slab.h (revision 2dfe63e61cc31ee59ce951672b0850b5229cd5b0)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
297d06609SChristoph Lameter #ifndef MM_SLAB_H
397d06609SChristoph Lameter #define MM_SLAB_H
497d06609SChristoph Lameter /*
597d06609SChristoph Lameter  * Internal slab definitions
697d06609SChristoph Lameter  */
797d06609SChristoph Lameter 
8d122019bSMatthew Wilcox (Oracle) /* Reuses the bits in struct page */
9d122019bSMatthew Wilcox (Oracle) struct slab {
10d122019bSMatthew Wilcox (Oracle) 	unsigned long __page_flags;
11401fb12cSVlastimil Babka 
12401fb12cSVlastimil Babka #if defined(CONFIG_SLAB)
13401fb12cSVlastimil Babka 
14d122019bSMatthew Wilcox (Oracle) 	union {
15d122019bSMatthew Wilcox (Oracle) 		struct list_head slab_list;
16401fb12cSVlastimil Babka 		struct rcu_head rcu_head;
17401fb12cSVlastimil Babka 	};
18401fb12cSVlastimil Babka 	struct kmem_cache *slab_cache;
19401fb12cSVlastimil Babka 	void *freelist;	/* array of free object indexes */
20401fb12cSVlastimil Babka 	void *s_mem;	/* first object */
21401fb12cSVlastimil Babka 	unsigned int active;
22401fb12cSVlastimil Babka 
23401fb12cSVlastimil Babka #elif defined(CONFIG_SLUB)
24401fb12cSVlastimil Babka 
25401fb12cSVlastimil Babka 	union {
26401fb12cSVlastimil Babka 		struct list_head slab_list;
27401fb12cSVlastimil Babka 		struct rcu_head rcu_head;
289c01e9afSVlastimil Babka #ifdef CONFIG_SLUB_CPU_PARTIAL
29401fb12cSVlastimil Babka 		struct {
30d122019bSMatthew Wilcox (Oracle) 			struct slab *next;
31d122019bSMatthew Wilcox (Oracle) 			int slabs;	/* Nr of slabs left */
32d122019bSMatthew Wilcox (Oracle) 		};
339c01e9afSVlastimil Babka #endif
34d122019bSMatthew Wilcox (Oracle) 	};
35401fb12cSVlastimil Babka 	struct kmem_cache *slab_cache;
36d122019bSMatthew Wilcox (Oracle) 	/* Double-word boundary */
37d122019bSMatthew Wilcox (Oracle) 	void *freelist;		/* first free object */
38d122019bSMatthew Wilcox (Oracle) 	union {
39401fb12cSVlastimil Babka 		unsigned long counters;
40401fb12cSVlastimil Babka 		struct {
41d122019bSMatthew Wilcox (Oracle) 			unsigned inuse:16;
42d122019bSMatthew Wilcox (Oracle) 			unsigned objects:15;
43d122019bSMatthew Wilcox (Oracle) 			unsigned frozen:1;
44d122019bSMatthew Wilcox (Oracle) 		};
45d122019bSMatthew Wilcox (Oracle) 	};
46401fb12cSVlastimil Babka 	unsigned int __unused;
47d122019bSMatthew Wilcox (Oracle) 
48401fb12cSVlastimil Babka #elif defined(CONFIG_SLOB)
49401fb12cSVlastimil Babka 
50401fb12cSVlastimil Babka 	struct list_head slab_list;
51401fb12cSVlastimil Babka 	void *__unused_1;
52401fb12cSVlastimil Babka 	void *freelist;		/* first free block */
53b01af5c0SHyeonggon Yoo 	long units;
54b01af5c0SHyeonggon Yoo 	unsigned int __unused_2;
55401fb12cSVlastimil Babka 
56401fb12cSVlastimil Babka #else
57401fb12cSVlastimil Babka #error "Unexpected slab allocator configured"
58401fb12cSVlastimil Babka #endif
59401fb12cSVlastimil Babka 
60d122019bSMatthew Wilcox (Oracle) 	atomic_t __page_refcount;
61d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
62d122019bSMatthew Wilcox (Oracle) 	unsigned long memcg_data;
63d122019bSMatthew Wilcox (Oracle) #endif
64d122019bSMatthew Wilcox (Oracle) };
65d122019bSMatthew Wilcox (Oracle) 
66d122019bSMatthew Wilcox (Oracle) #define SLAB_MATCH(pg, sl)						\
67d122019bSMatthew Wilcox (Oracle) 	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
68d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(flags, __page_flags);
69d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(compound_head, slab_list);	/* Ensure bit 0 is clear */
70401fb12cSVlastimil Babka #ifndef CONFIG_SLOB
71d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(rcu_head, rcu_head);
72401fb12cSVlastimil Babka #endif
73d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(_refcount, __page_refcount);
74d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
75d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(memcg_data, memcg_data);
76d122019bSMatthew Wilcox (Oracle) #endif
77d122019bSMatthew Wilcox (Oracle) #undef SLAB_MATCH
78d122019bSMatthew Wilcox (Oracle) static_assert(sizeof(struct slab) <= sizeof(struct page));
79d122019bSMatthew Wilcox (Oracle) 
80d122019bSMatthew Wilcox (Oracle) /**
81d122019bSMatthew Wilcox (Oracle)  * folio_slab - Converts from folio to slab.
82d122019bSMatthew Wilcox (Oracle)  * @folio: The folio.
83d122019bSMatthew Wilcox (Oracle)  *
84d122019bSMatthew Wilcox (Oracle)  * Currently struct slab is a different representation of a folio where
85d122019bSMatthew Wilcox (Oracle)  * folio_test_slab() is true.
86d122019bSMatthew Wilcox (Oracle)  *
87d122019bSMatthew Wilcox (Oracle)  * Return: The slab which contains this folio.
88d122019bSMatthew Wilcox (Oracle)  */
89d122019bSMatthew Wilcox (Oracle) #define folio_slab(folio)	(_Generic((folio),			\
90d122019bSMatthew Wilcox (Oracle) 	const struct folio *:	(const struct slab *)(folio),		\
91d122019bSMatthew Wilcox (Oracle) 	struct folio *:		(struct slab *)(folio)))
92d122019bSMatthew Wilcox (Oracle) 
93d122019bSMatthew Wilcox (Oracle) /**
94d122019bSMatthew Wilcox (Oracle)  * slab_folio - The folio allocated for a slab
95d122019bSMatthew Wilcox (Oracle)  * @slab: The slab.
96d122019bSMatthew Wilcox (Oracle)  *
97d122019bSMatthew Wilcox (Oracle)  * Slabs are allocated as folios that contain the individual objects and are
98d122019bSMatthew Wilcox (Oracle)  * using some fields in the first struct page of the folio - those fields are
99d122019bSMatthew Wilcox (Oracle)  * now accessed by struct slab. It is occasionally necessary to convert back to
100d122019bSMatthew Wilcox (Oracle)  * a folio in order to communicate with the rest of the mm.  Please use this
101d122019bSMatthew Wilcox (Oracle)  * helper function instead of casting yourself, as the implementation may change
102d122019bSMatthew Wilcox (Oracle)  * in the future.
103d122019bSMatthew Wilcox (Oracle)  */
104d122019bSMatthew Wilcox (Oracle) #define slab_folio(s)		(_Generic((s),				\
105d122019bSMatthew Wilcox (Oracle) 	const struct slab *:	(const struct folio *)s,		\
106d122019bSMatthew Wilcox (Oracle) 	struct slab *:		(struct folio *)s))
107d122019bSMatthew Wilcox (Oracle) 
108d122019bSMatthew Wilcox (Oracle) /**
109d122019bSMatthew Wilcox (Oracle)  * page_slab - Converts from first struct page to slab.
110d122019bSMatthew Wilcox (Oracle)  * @p: The first (either head of compound or single) page of slab.
111d122019bSMatthew Wilcox (Oracle)  *
112d122019bSMatthew Wilcox (Oracle)  * A temporary wrapper to convert struct page to struct slab in situations where
113d122019bSMatthew Wilcox (Oracle)  * we know the page is the compound head, or single order-0 page.
114d122019bSMatthew Wilcox (Oracle)  *
115d122019bSMatthew Wilcox (Oracle)  * Long-term ideally everything would work with struct slab directly or go
116d122019bSMatthew Wilcox (Oracle)  * through folio to struct slab.
117d122019bSMatthew Wilcox (Oracle)  *
118d122019bSMatthew Wilcox (Oracle)  * Return: The slab which contains this page
119d122019bSMatthew Wilcox (Oracle)  */
120d122019bSMatthew Wilcox (Oracle) #define page_slab(p)		(_Generic((p),				\
121d122019bSMatthew Wilcox (Oracle) 	const struct page *:	(const struct slab *)(p),		\
122d122019bSMatthew Wilcox (Oracle) 	struct page *:		(struct slab *)(p)))
123d122019bSMatthew Wilcox (Oracle) 
124d122019bSMatthew Wilcox (Oracle) /**
125d122019bSMatthew Wilcox (Oracle)  * slab_page - The first struct page allocated for a slab
126d122019bSMatthew Wilcox (Oracle)  * @slab: The slab.
127d122019bSMatthew Wilcox (Oracle)  *
128d122019bSMatthew Wilcox (Oracle)  * A convenience wrapper for converting slab to the first struct page of the
129d122019bSMatthew Wilcox (Oracle)  * underlying folio, to communicate with code not yet converted to folio or
130d122019bSMatthew Wilcox (Oracle)  * struct slab.
131d122019bSMatthew Wilcox (Oracle)  */
132d122019bSMatthew Wilcox (Oracle) #define slab_page(s) folio_page(slab_folio(s), 0)
133d122019bSMatthew Wilcox (Oracle) 
134d122019bSMatthew Wilcox (Oracle) /*
135d122019bSMatthew Wilcox (Oracle)  * If network-based swap is enabled, sl*b must keep track of whether pages
136d122019bSMatthew Wilcox (Oracle)  * were allocated from pfmemalloc reserves.
137d122019bSMatthew Wilcox (Oracle)  */
138d122019bSMatthew Wilcox (Oracle) static inline bool slab_test_pfmemalloc(const struct slab *slab)
139d122019bSMatthew Wilcox (Oracle) {
140d122019bSMatthew Wilcox (Oracle) 	return folio_test_active((struct folio *)slab_folio(slab));
141d122019bSMatthew Wilcox (Oracle) }
142d122019bSMatthew Wilcox (Oracle) 
143d122019bSMatthew Wilcox (Oracle) static inline void slab_set_pfmemalloc(struct slab *slab)
144d122019bSMatthew Wilcox (Oracle) {
145d122019bSMatthew Wilcox (Oracle) 	folio_set_active(slab_folio(slab));
146d122019bSMatthew Wilcox (Oracle) }
147d122019bSMatthew Wilcox (Oracle) 
148d122019bSMatthew Wilcox (Oracle) static inline void slab_clear_pfmemalloc(struct slab *slab)
149d122019bSMatthew Wilcox (Oracle) {
150d122019bSMatthew Wilcox (Oracle) 	folio_clear_active(slab_folio(slab));
151d122019bSMatthew Wilcox (Oracle) }
152d122019bSMatthew Wilcox (Oracle) 
153d122019bSMatthew Wilcox (Oracle) static inline void __slab_clear_pfmemalloc(struct slab *slab)
154d122019bSMatthew Wilcox (Oracle) {
155d122019bSMatthew Wilcox (Oracle) 	__folio_clear_active(slab_folio(slab));
156d122019bSMatthew Wilcox (Oracle) }
157d122019bSMatthew Wilcox (Oracle) 
158d122019bSMatthew Wilcox (Oracle) static inline void *slab_address(const struct slab *slab)
159d122019bSMatthew Wilcox (Oracle) {
160d122019bSMatthew Wilcox (Oracle) 	return folio_address(slab_folio(slab));
161d122019bSMatthew Wilcox (Oracle) }
162d122019bSMatthew Wilcox (Oracle) 
163d122019bSMatthew Wilcox (Oracle) static inline int slab_nid(const struct slab *slab)
164d122019bSMatthew Wilcox (Oracle) {
165d122019bSMatthew Wilcox (Oracle) 	return folio_nid(slab_folio(slab));
166d122019bSMatthew Wilcox (Oracle) }
167d122019bSMatthew Wilcox (Oracle) 
168d122019bSMatthew Wilcox (Oracle) static inline pg_data_t *slab_pgdat(const struct slab *slab)
169d122019bSMatthew Wilcox (Oracle) {
170d122019bSMatthew Wilcox (Oracle) 	return folio_pgdat(slab_folio(slab));
171d122019bSMatthew Wilcox (Oracle) }
172d122019bSMatthew Wilcox (Oracle) 
173d122019bSMatthew Wilcox (Oracle) static inline struct slab *virt_to_slab(const void *addr)
174d122019bSMatthew Wilcox (Oracle) {
175d122019bSMatthew Wilcox (Oracle) 	struct folio *folio = virt_to_folio(addr);
176d122019bSMatthew Wilcox (Oracle) 
177d122019bSMatthew Wilcox (Oracle) 	if (!folio_test_slab(folio))
178d122019bSMatthew Wilcox (Oracle) 		return NULL;
179d122019bSMatthew Wilcox (Oracle) 
180d122019bSMatthew Wilcox (Oracle) 	return folio_slab(folio);
181d122019bSMatthew Wilcox (Oracle) }
182d122019bSMatthew Wilcox (Oracle) 
183d122019bSMatthew Wilcox (Oracle) static inline int slab_order(const struct slab *slab)
184d122019bSMatthew Wilcox (Oracle) {
185d122019bSMatthew Wilcox (Oracle) 	return folio_order((struct folio *)slab_folio(slab));
186d122019bSMatthew Wilcox (Oracle) }
187d122019bSMatthew Wilcox (Oracle) 
188d122019bSMatthew Wilcox (Oracle) static inline size_t slab_size(const struct slab *slab)
189d122019bSMatthew Wilcox (Oracle) {
190d122019bSMatthew Wilcox (Oracle) 	return PAGE_SIZE << slab_order(slab);
191d122019bSMatthew Wilcox (Oracle) }
192d122019bSMatthew Wilcox (Oracle) 
19307f361b2SJoonsoo Kim #ifdef CONFIG_SLOB
19407f361b2SJoonsoo Kim /*
19507f361b2SJoonsoo Kim  * Common fields provided in kmem_cache by all slab allocators
19607f361b2SJoonsoo Kim  * This struct is either used directly by the allocator (SLOB)
19707f361b2SJoonsoo Kim  * or the allocator must include definitions for all fields
19807f361b2SJoonsoo Kim  * provided in kmem_cache_common in their definition of kmem_cache.
19907f361b2SJoonsoo Kim  *
20007f361b2SJoonsoo Kim  * Once we can do anonymous structs (C11 standard) we could put a
20107f361b2SJoonsoo Kim  * anonymous struct definition in these allocators so that the
20207f361b2SJoonsoo Kim  * separate allocations in the kmem_cache structure of SLAB and
20307f361b2SJoonsoo Kim  * SLUB is no longer needed.
20407f361b2SJoonsoo Kim  */
20507f361b2SJoonsoo Kim struct kmem_cache {
20607f361b2SJoonsoo Kim 	unsigned int object_size;/* The original size of the object */
20707f361b2SJoonsoo Kim 	unsigned int size;	/* The aligned/padded/added on size  */
20807f361b2SJoonsoo Kim 	unsigned int align;	/* Alignment as calculated */
209d50112edSAlexey Dobriyan 	slab_flags_t flags;	/* Active flags on the slab */
2107bbdb81eSAlexey Dobriyan 	unsigned int useroffset;/* Usercopy region offset */
2117bbdb81eSAlexey Dobriyan 	unsigned int usersize;	/* Usercopy region size */
21207f361b2SJoonsoo Kim 	const char *name;	/* Slab name for sysfs */
21307f361b2SJoonsoo Kim 	int refcount;		/* Use counter */
21407f361b2SJoonsoo Kim 	void (*ctor)(void *);	/* Called on object slot creation */
21507f361b2SJoonsoo Kim 	struct list_head list;	/* List of all slab caches on the system */
21607f361b2SJoonsoo Kim };
21707f361b2SJoonsoo Kim 
21807f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */
21907f361b2SJoonsoo Kim 
22007f361b2SJoonsoo Kim #ifdef CONFIG_SLAB
22107f361b2SJoonsoo Kim #include <linux/slab_def.h>
22207f361b2SJoonsoo Kim #endif
22307f361b2SJoonsoo Kim 
22407f361b2SJoonsoo Kim #ifdef CONFIG_SLUB
22507f361b2SJoonsoo Kim #include <linux/slub_def.h>
22607f361b2SJoonsoo Kim #endif
22707f361b2SJoonsoo Kim 
22807f361b2SJoonsoo Kim #include <linux/memcontrol.h>
22911c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h>
23011c7aec2SJesper Dangaard Brouer #include <linux/kasan.h>
23111c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h>
2327c00fce9SThomas Garnier #include <linux/random.h>
233d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h>
23488f2ef73SMuchun Song #include <linux/list_lru.h>
23507f361b2SJoonsoo Kim 
23697d06609SChristoph Lameter /*
23797d06609SChristoph Lameter  * State of the slab allocator.
23897d06609SChristoph Lameter  *
23997d06609SChristoph Lameter  * This is used to describe the states of the allocator during bootup.
24097d06609SChristoph Lameter  * Allocators use this to gradually bootstrap themselves. Most allocators
24197d06609SChristoph Lameter  * have the problem that the structures used for managing slab caches are
24297d06609SChristoph Lameter  * allocated from slab caches themselves.
24397d06609SChristoph Lameter  */
24497d06609SChristoph Lameter enum slab_state {
24597d06609SChristoph Lameter 	DOWN,			/* No slab functionality yet */
24697d06609SChristoph Lameter 	PARTIAL,		/* SLUB: kmem_cache_node available */
247ce8eb6c4SChristoph Lameter 	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
24897d06609SChristoph Lameter 	UP,			/* Slab caches usable but not all extras yet */
24997d06609SChristoph Lameter 	FULL			/* Everything is working */
25097d06609SChristoph Lameter };
25197d06609SChristoph Lameter 
25297d06609SChristoph Lameter extern enum slab_state slab_state;
25397d06609SChristoph Lameter 
25418004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */
25518004c5dSChristoph Lameter extern struct mutex slab_mutex;
2569b030cb8SChristoph Lameter 
2579b030cb8SChristoph Lameter /* The list of all slab caches on the system */
25818004c5dSChristoph Lameter extern struct list_head slab_caches;
25918004c5dSChristoph Lameter 
2609b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */
2619b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache;
2629b030cb8SChristoph Lameter 
263af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */
264af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct {
265cb5d9fb3SPengfei Li 	const char *name[NR_KMALLOC_TYPES];
26655de8b9cSAlexey Dobriyan 	unsigned int size;
267af3b5f87SVlastimil Babka } kmalloc_info[];
268af3b5f87SVlastimil Babka 
269f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB
270f97d5f63SChristoph Lameter /* Kmalloc array related functions */
27134cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void);
272d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t);
2732c59dd65SChristoph Lameter 
2742c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */
2752c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t);
276f97d5f63SChristoph Lameter #endif
277f97d5f63SChristoph Lameter 
27844405099SLong Li gfp_t kmalloc_fix_flags(gfp_t flags);
279f97d5f63SChristoph Lameter 
2809b030cb8SChristoph Lameter /* Functions provided by the slab allocators */
281d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
28297d06609SChristoph Lameter 
28355de8b9cSAlexey Dobriyan struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
28455de8b9cSAlexey Dobriyan 			slab_flags_t flags, unsigned int useroffset,
28555de8b9cSAlexey Dobriyan 			unsigned int usersize);
28645530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name,
287361d575eSAlexey Dobriyan 			unsigned int size, slab_flags_t flags,
288361d575eSAlexey Dobriyan 			unsigned int useroffset, unsigned int usersize);
28945530c44SChristoph Lameter 
290423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s);
291f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align,
292d50112edSAlexey Dobriyan 		slab_flags_t flags, const char *name, void (*ctor)(void *));
29312220deaSJoonsoo Kim #ifndef CONFIG_SLOB
2942633d7a0SGlauber Costa struct kmem_cache *
295f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
296d50112edSAlexey Dobriyan 		   slab_flags_t flags, void (*ctor)(void *));
297423c929cSJoonsoo Kim 
2980293d1fdSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned int object_size,
29937540008SNikolay Borisov 	slab_flags_t flags, const char *name);
300cbb79694SChristoph Lameter #else
3012633d7a0SGlauber Costa static inline struct kmem_cache *
302f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
303d50112edSAlexey Dobriyan 		   slab_flags_t flags, void (*ctor)(void *))
304cbb79694SChristoph Lameter { return NULL; }
305423c929cSJoonsoo Kim 
3060293d1fdSAlexey Dobriyan static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
30737540008SNikolay Borisov 	slab_flags_t flags, const char *name)
308423c929cSJoonsoo Kim {
309423c929cSJoonsoo Kim 	return flags;
310423c929cSJoonsoo Kim }
311cbb79694SChristoph Lameter #endif
312cbb79694SChristoph Lameter 
313cbb79694SChristoph Lameter 
314d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */
3156d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
3166d6ea1e9SNicolas Boichat 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
3175f0d5a3aSPaul E. McKenney 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
318d8843922SGlauber Costa 
319d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB)
320d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
321d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG)
322d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
323becfda68SLaura Abbott 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
324d8843922SGlauber Costa #else
325d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0)
326d8843922SGlauber Costa #endif
327d8843922SGlauber Costa 
328d8843922SGlauber Costa #if defined(CONFIG_SLAB)
329d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
330230e9fc2SVladimir Davydov 			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
33175f296d9SLevin, Alexander (Sasha Levin) 			  SLAB_ACCOUNT)
332d8843922SGlauber Costa #elif defined(CONFIG_SLUB)
333d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
33475f296d9SLevin, Alexander (Sasha Levin) 			  SLAB_TEMPORARY | SLAB_ACCOUNT)
335d8843922SGlauber Costa #else
33634dbc3aaSRustam Kovhaev #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
337d8843922SGlauber Costa #endif
338d8843922SGlauber Costa 
339e70954fdSThomas Garnier /* Common flags available with current configuration */
340d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
341d8843922SGlauber Costa 
342e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */
343e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
344e70954fdSThomas Garnier 			      SLAB_RED_ZONE | \
345e70954fdSThomas Garnier 			      SLAB_POISON | \
346e70954fdSThomas Garnier 			      SLAB_STORE_USER | \
347e70954fdSThomas Garnier 			      SLAB_TRACE | \
348e70954fdSThomas Garnier 			      SLAB_CONSISTENCY_CHECKS | \
349e70954fdSThomas Garnier 			      SLAB_MEM_SPREAD | \
350e70954fdSThomas Garnier 			      SLAB_NOLEAKTRACE | \
351e70954fdSThomas Garnier 			      SLAB_RECLAIM_ACCOUNT | \
352e70954fdSThomas Garnier 			      SLAB_TEMPORARY | \
353e70954fdSThomas Garnier 			      SLAB_ACCOUNT)
354e70954fdSThomas Garnier 
355f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *);
356945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *);
35752b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *);
358c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *);
35941a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *);
360945cf2b6SChristoph Lameter 
361b7454ad3SGlauber Costa struct seq_file;
362b7454ad3SGlauber Costa struct file;
363b7454ad3SGlauber Costa 
3640d7561c6SGlauber Costa struct slabinfo {
3650d7561c6SGlauber Costa 	unsigned long active_objs;
3660d7561c6SGlauber Costa 	unsigned long num_objs;
3670d7561c6SGlauber Costa 	unsigned long active_slabs;
3680d7561c6SGlauber Costa 	unsigned long num_slabs;
3690d7561c6SGlauber Costa 	unsigned long shared_avail;
3700d7561c6SGlauber Costa 	unsigned int limit;
3710d7561c6SGlauber Costa 	unsigned int batchcount;
3720d7561c6SGlauber Costa 	unsigned int shared;
3730d7561c6SGlauber Costa 	unsigned int objects_per_slab;
3740d7561c6SGlauber Costa 	unsigned int cache_order;
3750d7561c6SGlauber Costa };
3760d7561c6SGlauber Costa 
3770d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
3780d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
379b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer,
380b7454ad3SGlauber Costa 		       size_t count, loff_t *ppos);
381ba6c496eSGlauber Costa 
382484748f0SChristoph Lameter /*
383484748f0SChristoph Lameter  * Generic implementation of bulk operations
384484748f0SChristoph Lameter  * These are useful for situations in which the allocator cannot
3859f706d68SJesper Dangaard Brouer  * perform optimizations. In that case segments of the object listed
386484748f0SChristoph Lameter  * may be allocated or freed using these operations.
387484748f0SChristoph Lameter  */
388484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
389865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
390484748f0SChristoph Lameter 
3911a984c4eSMuchun Song static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
3926cea1d56SRoman Gushchin {
3936cea1d56SRoman Gushchin 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
394d42f3245SRoman Gushchin 		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
3956cea1d56SRoman Gushchin }
3966cea1d56SRoman Gushchin 
397e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG
398e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG_ON
399e42f174eSVlastimil Babka DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
400e42f174eSVlastimil Babka #else
401e42f174eSVlastimil Babka DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
402e42f174eSVlastimil Babka #endif
403e42f174eSVlastimil Babka extern void print_tracking(struct kmem_cache *s, void *object);
4041f9f78b1SOliver Glitta long validate_slab_cache(struct kmem_cache *s);
4050d4a062aSMarco Elver static inline bool __slub_debug_enabled(void)
4060d4a062aSMarco Elver {
4070d4a062aSMarco Elver 	return static_branch_unlikely(&slub_debug_enabled);
4080d4a062aSMarco Elver }
409e42f174eSVlastimil Babka #else
410e42f174eSVlastimil Babka static inline void print_tracking(struct kmem_cache *s, void *object)
411e42f174eSVlastimil Babka {
412e42f174eSVlastimil Babka }
4130d4a062aSMarco Elver static inline bool __slub_debug_enabled(void)
4140d4a062aSMarco Elver {
4150d4a062aSMarco Elver 	return false;
4160d4a062aSMarco Elver }
417e42f174eSVlastimil Babka #endif
418e42f174eSVlastimil Babka 
419e42f174eSVlastimil Babka /*
420e42f174eSVlastimil Babka  * Returns true if any of the specified slub_debug flags is enabled for the
421e42f174eSVlastimil Babka  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
422e42f174eSVlastimil Babka  * the static key.
423e42f174eSVlastimil Babka  */
424e42f174eSVlastimil Babka static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
425e42f174eSVlastimil Babka {
4260d4a062aSMarco Elver 	if (IS_ENABLED(CONFIG_SLUB_DEBUG))
427e42f174eSVlastimil Babka 		VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
4280d4a062aSMarco Elver 	if (__slub_debug_enabled())
429e42f174eSVlastimil Babka 		return s->flags & flags;
430e42f174eSVlastimil Babka 	return false;
431e42f174eSVlastimil Babka }
432e42f174eSVlastimil Babka 
43384c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM
4344b5f8d9aSVlastimil Babka /*
4354b5f8d9aSVlastimil Babka  * slab_objcgs - get the object cgroups vector associated with a slab
4364b5f8d9aSVlastimil Babka  * @slab: a pointer to the slab struct
4374b5f8d9aSVlastimil Babka  *
4384b5f8d9aSVlastimil Babka  * Returns a pointer to the object cgroups vector associated with the slab,
4394b5f8d9aSVlastimil Babka  * or NULL if no such vector has been associated yet.
4404b5f8d9aSVlastimil Babka  */
4414b5f8d9aSVlastimil Babka static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
4424b5f8d9aSVlastimil Babka {
4434b5f8d9aSVlastimil Babka 	unsigned long memcg_data = READ_ONCE(slab->memcg_data);
4444b5f8d9aSVlastimil Babka 
4454b5f8d9aSVlastimil Babka 	VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
4464b5f8d9aSVlastimil Babka 							slab_page(slab));
4474b5f8d9aSVlastimil Babka 	VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
4484b5f8d9aSVlastimil Babka 
4494b5f8d9aSVlastimil Babka 	return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
4504b5f8d9aSVlastimil Babka }
4514b5f8d9aSVlastimil Babka 
4524b5f8d9aSVlastimil Babka int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
4534b5f8d9aSVlastimil Babka 				 gfp_t gfp, bool new_slab);
454fdbcb2a6SWaiman Long void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
455fdbcb2a6SWaiman Long 		     enum node_stat_item idx, int nr);
456286e04b8SRoman Gushchin 
4574b5f8d9aSVlastimil Babka static inline void memcg_free_slab_cgroups(struct slab *slab)
458286e04b8SRoman Gushchin {
4594b5f8d9aSVlastimil Babka 	kfree(slab_objcgs(slab));
4604b5f8d9aSVlastimil Babka 	slab->memcg_data = 0;
461286e04b8SRoman Gushchin }
462286e04b8SRoman Gushchin 
463f2fe7b09SRoman Gushchin static inline size_t obj_full_size(struct kmem_cache *s)
464f2fe7b09SRoman Gushchin {
465f2fe7b09SRoman Gushchin 	/*
466f2fe7b09SRoman Gushchin 	 * For each accounted object there is an extra space which is used
467f2fe7b09SRoman Gushchin 	 * to store obj_cgroup membership. Charge it too.
468f2fe7b09SRoman Gushchin 	 */
469f2fe7b09SRoman Gushchin 	return s->size + sizeof(struct obj_cgroup *);
470f2fe7b09SRoman Gushchin }
471f2fe7b09SRoman Gushchin 
472becaba65SRoman Gushchin /*
473becaba65SRoman Gushchin  * Returns false if the allocation should fail.
474becaba65SRoman Gushchin  */
475becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
47688f2ef73SMuchun Song 					     struct list_lru *lru,
477becaba65SRoman Gushchin 					     struct obj_cgroup **objcgp,
478becaba65SRoman Gushchin 					     size_t objects, gfp_t flags)
479f2fe7b09SRoman Gushchin {
4809855609bSRoman Gushchin 	struct obj_cgroup *objcg;
481f2fe7b09SRoman Gushchin 
482becaba65SRoman Gushchin 	if (!memcg_kmem_enabled())
483becaba65SRoman Gushchin 		return true;
484becaba65SRoman Gushchin 
485becaba65SRoman Gushchin 	if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
486becaba65SRoman Gushchin 		return true;
487becaba65SRoman Gushchin 
4889855609bSRoman Gushchin 	objcg = get_obj_cgroup_from_current();
4899855609bSRoman Gushchin 	if (!objcg)
490becaba65SRoman Gushchin 		return true;
4919855609bSRoman Gushchin 
49288f2ef73SMuchun Song 	if (lru) {
49388f2ef73SMuchun Song 		int ret;
49488f2ef73SMuchun Song 		struct mem_cgroup *memcg;
49588f2ef73SMuchun Song 
49688f2ef73SMuchun Song 		memcg = get_mem_cgroup_from_objcg(objcg);
49788f2ef73SMuchun Song 		ret = memcg_list_lru_alloc(memcg, lru, flags);
49888f2ef73SMuchun Song 		css_put(&memcg->css);
49988f2ef73SMuchun Song 
50088f2ef73SMuchun Song 		if (ret)
50188f2ef73SMuchun Song 			goto out;
502f2fe7b09SRoman Gushchin 	}
503f2fe7b09SRoman Gushchin 
50488f2ef73SMuchun Song 	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
50588f2ef73SMuchun Song 		goto out;
50688f2ef73SMuchun Song 
507becaba65SRoman Gushchin 	*objcgp = objcg;
508becaba65SRoman Gushchin 	return true;
50988f2ef73SMuchun Song out:
51088f2ef73SMuchun Song 	obj_cgroup_put(objcg);
51188f2ef73SMuchun Song 	return false;
512f2fe7b09SRoman Gushchin }
513f2fe7b09SRoman Gushchin 
514964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
515964d4bd3SRoman Gushchin 					      struct obj_cgroup *objcg,
51610befea9SRoman Gushchin 					      gfp_t flags, size_t size,
51710befea9SRoman Gushchin 					      void **p)
518964d4bd3SRoman Gushchin {
5194b5f8d9aSVlastimil Babka 	struct slab *slab;
520964d4bd3SRoman Gushchin 	unsigned long off;
521964d4bd3SRoman Gushchin 	size_t i;
522964d4bd3SRoman Gushchin 
523becaba65SRoman Gushchin 	if (!memcg_kmem_enabled() || !objcg)
52410befea9SRoman Gushchin 		return;
52510befea9SRoman Gushchin 
526964d4bd3SRoman Gushchin 	for (i = 0; i < size; i++) {
527964d4bd3SRoman Gushchin 		if (likely(p[i])) {
5284b5f8d9aSVlastimil Babka 			slab = virt_to_slab(p[i]);
52910befea9SRoman Gushchin 
5304b5f8d9aSVlastimil Babka 			if (!slab_objcgs(slab) &&
5314b5f8d9aSVlastimil Babka 			    memcg_alloc_slab_cgroups(slab, s, flags,
5322e9bd483SRoman Gushchin 							 false)) {
53310befea9SRoman Gushchin 				obj_cgroup_uncharge(objcg, obj_full_size(s));
53410befea9SRoman Gushchin 				continue;
53510befea9SRoman Gushchin 			}
53610befea9SRoman Gushchin 
5374b5f8d9aSVlastimil Babka 			off = obj_to_index(s, slab, p[i]);
538964d4bd3SRoman Gushchin 			obj_cgroup_get(objcg);
5394b5f8d9aSVlastimil Babka 			slab_objcgs(slab)[off] = objcg;
5404b5f8d9aSVlastimil Babka 			mod_objcg_state(objcg, slab_pgdat(slab),
541f2fe7b09SRoman Gushchin 					cache_vmstat_idx(s), obj_full_size(s));
542f2fe7b09SRoman Gushchin 		} else {
543f2fe7b09SRoman Gushchin 			obj_cgroup_uncharge(objcg, obj_full_size(s));
544964d4bd3SRoman Gushchin 		}
545964d4bd3SRoman Gushchin 	}
546964d4bd3SRoman Gushchin 	obj_cgroup_put(objcg);
547964d4bd3SRoman Gushchin }
548964d4bd3SRoman Gushchin 
549d1b2cf6cSBharata B Rao static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
550d1b2cf6cSBharata B Rao 					void **p, int objects)
551964d4bd3SRoman Gushchin {
552d1b2cf6cSBharata B Rao 	struct kmem_cache *s;
553270c6a71SRoman Gushchin 	struct obj_cgroup **objcgs;
554964d4bd3SRoman Gushchin 	struct obj_cgroup *objcg;
5554b5f8d9aSVlastimil Babka 	struct slab *slab;
556964d4bd3SRoman Gushchin 	unsigned int off;
557d1b2cf6cSBharata B Rao 	int i;
558964d4bd3SRoman Gushchin 
55910befea9SRoman Gushchin 	if (!memcg_kmem_enabled())
56010befea9SRoman Gushchin 		return;
56110befea9SRoman Gushchin 
562d1b2cf6cSBharata B Rao 	for (i = 0; i < objects; i++) {
563d1b2cf6cSBharata B Rao 		if (unlikely(!p[i]))
564d1b2cf6cSBharata B Rao 			continue;
565d1b2cf6cSBharata B Rao 
5664b5f8d9aSVlastimil Babka 		slab = virt_to_slab(p[i]);
5674b5f8d9aSVlastimil Babka 		/* we could be given a kmalloc_large() object, skip those */
5684b5f8d9aSVlastimil Babka 		if (!slab)
5694b5f8d9aSVlastimil Babka 			continue;
5704b5f8d9aSVlastimil Babka 
5714b5f8d9aSVlastimil Babka 		objcgs = slab_objcgs(slab);
572270c6a71SRoman Gushchin 		if (!objcgs)
573d1b2cf6cSBharata B Rao 			continue;
574964d4bd3SRoman Gushchin 
575d1b2cf6cSBharata B Rao 		if (!s_orig)
5764b5f8d9aSVlastimil Babka 			s = slab->slab_cache;
577d1b2cf6cSBharata B Rao 		else
578d1b2cf6cSBharata B Rao 			s = s_orig;
579d1b2cf6cSBharata B Rao 
5804b5f8d9aSVlastimil Babka 		off = obj_to_index(s, slab, p[i]);
581270c6a71SRoman Gushchin 		objcg = objcgs[off];
58210befea9SRoman Gushchin 		if (!objcg)
583d1b2cf6cSBharata B Rao 			continue;
58410befea9SRoman Gushchin 
585270c6a71SRoman Gushchin 		objcgs[off] = NULL;
586f2fe7b09SRoman Gushchin 		obj_cgroup_uncharge(objcg, obj_full_size(s));
5874b5f8d9aSVlastimil Babka 		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
588f2fe7b09SRoman Gushchin 				-obj_full_size(s));
589964d4bd3SRoman Gushchin 		obj_cgroup_put(objcg);
590964d4bd3SRoman Gushchin 	}
591d1b2cf6cSBharata B Rao }
592964d4bd3SRoman Gushchin 
59384c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */
5944b5f8d9aSVlastimil Babka static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
5954b5f8d9aSVlastimil Babka {
5964b5f8d9aSVlastimil Babka 	return NULL;
5974b5f8d9aSVlastimil Babka }
5984b5f8d9aSVlastimil Babka 
5999855609bSRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
6004d96ba35SRoman Gushchin {
6014d96ba35SRoman Gushchin 	return NULL;
6024d96ba35SRoman Gushchin }
6034d96ba35SRoman Gushchin 
6044b5f8d9aSVlastimil Babka static inline int memcg_alloc_slab_cgroups(struct slab *slab,
6052e9bd483SRoman Gushchin 					       struct kmem_cache *s, gfp_t gfp,
6064b5f8d9aSVlastimil Babka 					       bool new_slab)
607286e04b8SRoman Gushchin {
608286e04b8SRoman Gushchin 	return 0;
609286e04b8SRoman Gushchin }
610286e04b8SRoman Gushchin 
6114b5f8d9aSVlastimil Babka static inline void memcg_free_slab_cgroups(struct slab *slab)
612286e04b8SRoman Gushchin {
613286e04b8SRoman Gushchin }
614286e04b8SRoman Gushchin 
615becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
61688f2ef73SMuchun Song 					     struct list_lru *lru,
617becaba65SRoman Gushchin 					     struct obj_cgroup **objcgp,
618becaba65SRoman Gushchin 					     size_t objects, gfp_t flags)
619f2fe7b09SRoman Gushchin {
620becaba65SRoman Gushchin 	return true;
621f2fe7b09SRoman Gushchin }
622f2fe7b09SRoman Gushchin 
623964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
624964d4bd3SRoman Gushchin 					      struct obj_cgroup *objcg,
62510befea9SRoman Gushchin 					      gfp_t flags, size_t size,
62610befea9SRoman Gushchin 					      void **p)
627964d4bd3SRoman Gushchin {
628964d4bd3SRoman Gushchin }
629964d4bd3SRoman Gushchin 
630d1b2cf6cSBharata B Rao static inline void memcg_slab_free_hook(struct kmem_cache *s,
631d1b2cf6cSBharata B Rao 					void **p, int objects)
632964d4bd3SRoman Gushchin {
633964d4bd3SRoman Gushchin }
63484c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */
635b9ce5ef4SGlauber Costa 
636401fb12cSVlastimil Babka #ifndef CONFIG_SLOB
637a64b5378SKees Cook static inline struct kmem_cache *virt_to_cache(const void *obj)
638a64b5378SKees Cook {
63982c1775dSMatthew Wilcox (Oracle) 	struct slab *slab;
640a64b5378SKees Cook 
64182c1775dSMatthew Wilcox (Oracle) 	slab = virt_to_slab(obj);
64282c1775dSMatthew Wilcox (Oracle) 	if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
643a64b5378SKees Cook 					__func__))
644a64b5378SKees Cook 		return NULL;
64582c1775dSMatthew Wilcox (Oracle) 	return slab->slab_cache;
646a64b5378SKees Cook }
647a64b5378SKees Cook 
648b918653bSMatthew Wilcox (Oracle) static __always_inline void account_slab(struct slab *slab, int order,
649b918653bSMatthew Wilcox (Oracle) 					 struct kmem_cache *s, gfp_t gfp)
6506cea1d56SRoman Gushchin {
6512e9bd483SRoman Gushchin 	if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
6524b5f8d9aSVlastimil Babka 		memcg_alloc_slab_cgroups(slab, s, gfp, true);
6532e9bd483SRoman Gushchin 
654b918653bSMatthew Wilcox (Oracle) 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
655f2fe7b09SRoman Gushchin 			    PAGE_SIZE << order);
6566cea1d56SRoman Gushchin }
6576cea1d56SRoman Gushchin 
658b918653bSMatthew Wilcox (Oracle) static __always_inline void unaccount_slab(struct slab *slab, int order,
6596cea1d56SRoman Gushchin 					   struct kmem_cache *s)
6606cea1d56SRoman Gushchin {
66110befea9SRoman Gushchin 	if (memcg_kmem_enabled())
6624b5f8d9aSVlastimil Babka 		memcg_free_slab_cgroups(slab);
6639855609bSRoman Gushchin 
664b918653bSMatthew Wilcox (Oracle) 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
665d42f3245SRoman Gushchin 			    -(PAGE_SIZE << order));
6666cea1d56SRoman Gushchin }
6676cea1d56SRoman Gushchin 
668e42f174eSVlastimil Babka static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
669e42f174eSVlastimil Babka {
670e42f174eSVlastimil Babka 	struct kmem_cache *cachep;
671e42f174eSVlastimil Babka 
672e42f174eSVlastimil Babka 	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
673e42f174eSVlastimil Babka 	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
674e42f174eSVlastimil Babka 		return s;
675e42f174eSVlastimil Babka 
676e42f174eSVlastimil Babka 	cachep = virt_to_cache(x);
67710befea9SRoman Gushchin 	if (WARN(cachep && cachep != s,
678e42f174eSVlastimil Babka 		  "%s: Wrong slab cache. %s but object is from %s\n",
679e42f174eSVlastimil Babka 		  __func__, s->name, cachep->name))
680e42f174eSVlastimil Babka 		print_tracking(cachep, x);
681e42f174eSVlastimil Babka 	return cachep;
682e42f174eSVlastimil Babka }
683401fb12cSVlastimil Babka #endif /* CONFIG_SLOB */
684e42f174eSVlastimil Babka 
68511c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s)
68611c7aec2SJesper Dangaard Brouer {
68711c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB
68811c7aec2SJesper Dangaard Brouer 	return s->object_size;
68911c7aec2SJesper Dangaard Brouer 
69011c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */
69111c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG
69211c7aec2SJesper Dangaard Brouer 	/*
69311c7aec2SJesper Dangaard Brouer 	 * Debugging requires use of the padding between object
69411c7aec2SJesper Dangaard Brouer 	 * and whatever may come after it.
69511c7aec2SJesper Dangaard Brouer 	 */
69611c7aec2SJesper Dangaard Brouer 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
69711c7aec2SJesper Dangaard Brouer 		return s->object_size;
69811c7aec2SJesper Dangaard Brouer # endif
69980a9201aSAlexander Potapenko 	if (s->flags & SLAB_KASAN)
70080a9201aSAlexander Potapenko 		return s->object_size;
70111c7aec2SJesper Dangaard Brouer 	/*
70211c7aec2SJesper Dangaard Brouer 	 * If we have the need to store the freelist pointer
70311c7aec2SJesper Dangaard Brouer 	 * back there or track user information then we can
70411c7aec2SJesper Dangaard Brouer 	 * only use the space before that information.
70511c7aec2SJesper Dangaard Brouer 	 */
7065f0d5a3aSPaul E. McKenney 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
70711c7aec2SJesper Dangaard Brouer 		return s->inuse;
70811c7aec2SJesper Dangaard Brouer 	/*
70911c7aec2SJesper Dangaard Brouer 	 * Else we can use all the padding etc for the allocation
71011c7aec2SJesper Dangaard Brouer 	 */
71111c7aec2SJesper Dangaard Brouer 	return s->size;
71211c7aec2SJesper Dangaard Brouer #endif
71311c7aec2SJesper Dangaard Brouer }
71411c7aec2SJesper Dangaard Brouer 
71511c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
71688f2ef73SMuchun Song 						     struct list_lru *lru,
717964d4bd3SRoman Gushchin 						     struct obj_cgroup **objcgp,
718964d4bd3SRoman Gushchin 						     size_t size, gfp_t flags)
71911c7aec2SJesper Dangaard Brouer {
72011c7aec2SJesper Dangaard Brouer 	flags &= gfp_allowed_mask;
721d92a8cfcSPeter Zijlstra 
72295d6c701SDaniel Vetter 	might_alloc(flags);
72311c7aec2SJesper Dangaard Brouer 
724fab9963aSJesper Dangaard Brouer 	if (should_failslab(s, flags))
72511c7aec2SJesper Dangaard Brouer 		return NULL;
72611c7aec2SJesper Dangaard Brouer 
72788f2ef73SMuchun Song 	if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
728becaba65SRoman Gushchin 		return NULL;
72945264778SVladimir Davydov 
73045264778SVladimir Davydov 	return s;
73111c7aec2SJesper Dangaard Brouer }
73211c7aec2SJesper Dangaard Brouer 
733964d4bd3SRoman Gushchin static inline void slab_post_alloc_hook(struct kmem_cache *s,
734da844b78SAndrey Konovalov 					struct obj_cgroup *objcg, gfp_t flags,
735da844b78SAndrey Konovalov 					size_t size, void **p, bool init)
73611c7aec2SJesper Dangaard Brouer {
73711c7aec2SJesper Dangaard Brouer 	size_t i;
73811c7aec2SJesper Dangaard Brouer 
73911c7aec2SJesper Dangaard Brouer 	flags &= gfp_allowed_mask;
740da844b78SAndrey Konovalov 
741da844b78SAndrey Konovalov 	/*
742da844b78SAndrey Konovalov 	 * As memory initialization might be integrated into KASAN,
743da844b78SAndrey Konovalov 	 * kasan_slab_alloc and initialization memset must be
744da844b78SAndrey Konovalov 	 * kept together to avoid discrepancies in behavior.
745da844b78SAndrey Konovalov 	 *
746da844b78SAndrey Konovalov 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
747da844b78SAndrey Konovalov 	 */
74811c7aec2SJesper Dangaard Brouer 	for (i = 0; i < size; i++) {
749da844b78SAndrey Konovalov 		p[i] = kasan_slab_alloc(s, p[i], flags, init);
750da844b78SAndrey Konovalov 		if (p[i] && init && !kasan_has_integrated_init())
751da844b78SAndrey Konovalov 			memset(p[i], 0, s->object_size);
75253128245SAndrey Konovalov 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
75311c7aec2SJesper Dangaard Brouer 					 s->flags, flags);
75411c7aec2SJesper Dangaard Brouer 	}
75545264778SVladimir Davydov 
75610befea9SRoman Gushchin 	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
75711c7aec2SJesper Dangaard Brouer }
75811c7aec2SJesper Dangaard Brouer 
75944c5356fSChristoph Lameter #ifndef CONFIG_SLOB
760ca34956bSChristoph Lameter /*
761ca34956bSChristoph Lameter  * The slab lists for all objects.
762ca34956bSChristoph Lameter  */
763ca34956bSChristoph Lameter struct kmem_cache_node {
764ca34956bSChristoph Lameter 	spinlock_t list_lock;
765ca34956bSChristoph Lameter 
766ca34956bSChristoph Lameter #ifdef CONFIG_SLAB
767ca34956bSChristoph Lameter 	struct list_head slabs_partial;	/* partial list first, better asm code */
768ca34956bSChristoph Lameter 	struct list_head slabs_full;
769ca34956bSChristoph Lameter 	struct list_head slabs_free;
770bf00bd34SDavid Rientjes 	unsigned long total_slabs;	/* length of all slab lists */
771bf00bd34SDavid Rientjes 	unsigned long free_slabs;	/* length of free slab list only */
772ca34956bSChristoph Lameter 	unsigned long free_objects;
773ca34956bSChristoph Lameter 	unsigned int free_limit;
774ca34956bSChristoph Lameter 	unsigned int colour_next;	/* Per-node cache coloring */
775ca34956bSChristoph Lameter 	struct array_cache *shared;	/* shared per node */
776c8522a3aSJoonsoo Kim 	struct alien_cache **alien;	/* on other nodes */
777ca34956bSChristoph Lameter 	unsigned long next_reap;	/* updated without locking */
778ca34956bSChristoph Lameter 	int free_touched;		/* updated without locking */
779ca34956bSChristoph Lameter #endif
780ca34956bSChristoph Lameter 
781ca34956bSChristoph Lameter #ifdef CONFIG_SLUB
782ca34956bSChristoph Lameter 	unsigned long nr_partial;
783ca34956bSChristoph Lameter 	struct list_head partial;
784ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG
785ca34956bSChristoph Lameter 	atomic_long_t nr_slabs;
786ca34956bSChristoph Lameter 	atomic_long_t total_objects;
787ca34956bSChristoph Lameter 	struct list_head full;
788ca34956bSChristoph Lameter #endif
789ca34956bSChristoph Lameter #endif
790ca34956bSChristoph Lameter 
791ca34956bSChristoph Lameter };
792e25839f6SWanpeng Li 
79344c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
79444c5356fSChristoph Lameter {
79544c5356fSChristoph Lameter 	return s->node[node];
79644c5356fSChristoph Lameter }
79744c5356fSChristoph Lameter 
79844c5356fSChristoph Lameter /*
79944c5356fSChristoph Lameter  * Iterator over all nodes. The body will be executed for each node that has
80044c5356fSChristoph Lameter  * a kmem_cache_node structure allocated (which is true for all online nodes)
80144c5356fSChristoph Lameter  */
80244c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \
8039163582cSMikulas Patocka 	for (__node = 0; __node < nr_node_ids; __node++) \
8049163582cSMikulas Patocka 		 if ((__n = get_node(__s, __node)))
80544c5356fSChristoph Lameter 
80644c5356fSChristoph Lameter #endif
80744c5356fSChristoph Lameter 
808852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
809852d8be0SYang Shi void dump_unreclaimable_slab(void);
810852d8be0SYang Shi #else
811852d8be0SYang Shi static inline void dump_unreclaimable_slab(void)
812852d8be0SYang Shi {
813852d8be0SYang Shi }
814852d8be0SYang Shi #endif
815852d8be0SYang Shi 
81655834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
81755834c59SAlexander Potapenko 
8187c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM
8197c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
8207c00fce9SThomas Garnier 			gfp_t gfp);
8217c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep);
8227c00fce9SThomas Garnier #else
8237c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep,
8247c00fce9SThomas Garnier 					unsigned int count, gfp_t gfp)
8257c00fce9SThomas Garnier {
8267c00fce9SThomas Garnier 	return 0;
8277c00fce9SThomas Garnier }
8287c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
8297c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */
8307c00fce9SThomas Garnier 
8316471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
8326471384aSAlexander Potapenko {
83351cba1ebSKees Cook 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
83451cba1ebSKees Cook 				&init_on_alloc)) {
8356471384aSAlexander Potapenko 		if (c->ctor)
8366471384aSAlexander Potapenko 			return false;
8376471384aSAlexander Potapenko 		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
8386471384aSAlexander Potapenko 			return flags & __GFP_ZERO;
8396471384aSAlexander Potapenko 		return true;
8406471384aSAlexander Potapenko 	}
8416471384aSAlexander Potapenko 	return flags & __GFP_ZERO;
8426471384aSAlexander Potapenko }
8436471384aSAlexander Potapenko 
8446471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c)
8456471384aSAlexander Potapenko {
84651cba1ebSKees Cook 	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
84751cba1ebSKees Cook 				&init_on_free))
8486471384aSAlexander Potapenko 		return !(c->ctor ||
8496471384aSAlexander Potapenko 			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
8506471384aSAlexander Potapenko 	return false;
8516471384aSAlexander Potapenko }
8526471384aSAlexander Potapenko 
85364dd6849SFaiyaz Mohammed #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
85464dd6849SFaiyaz Mohammed void debugfs_slab_release(struct kmem_cache *);
85564dd6849SFaiyaz Mohammed #else
85664dd6849SFaiyaz Mohammed static inline void debugfs_slab_release(struct kmem_cache *s) { }
85764dd6849SFaiyaz Mohammed #endif
85864dd6849SFaiyaz Mohammed 
8595bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK
8608e7f37f2SPaul E. McKenney #define KS_ADDRS_COUNT 16
8618e7f37f2SPaul E. McKenney struct kmem_obj_info {
8628e7f37f2SPaul E. McKenney 	void *kp_ptr;
8637213230aSMatthew Wilcox (Oracle) 	struct slab *kp_slab;
8648e7f37f2SPaul E. McKenney 	void *kp_objp;
8658e7f37f2SPaul E. McKenney 	unsigned long kp_data_offset;
8668e7f37f2SPaul E. McKenney 	struct kmem_cache *kp_slab_cache;
8678e7f37f2SPaul E. McKenney 	void *kp_ret;
8688e7f37f2SPaul E. McKenney 	void *kp_stack[KS_ADDRS_COUNT];
869e548eaa1SManinder Singh 	void *kp_free_stack[KS_ADDRS_COUNT];
8708e7f37f2SPaul E. McKenney };
871*2dfe63e6SMarco Elver void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
8725bb1bb35SPaul E. McKenney #endif
8738e7f37f2SPaul E. McKenney 
8740b3eb091SMatthew Wilcox (Oracle) #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
8750b3eb091SMatthew Wilcox (Oracle) void __check_heap_object(const void *ptr, unsigned long n,
8760b3eb091SMatthew Wilcox (Oracle) 			 const struct slab *slab, bool to_user);
8770b3eb091SMatthew Wilcox (Oracle) #else
8780b3eb091SMatthew Wilcox (Oracle) static inline
8790b3eb091SMatthew Wilcox (Oracle) void __check_heap_object(const void *ptr, unsigned long n,
8800b3eb091SMatthew Wilcox (Oracle) 			 const struct slab *slab, bool to_user)
8810b3eb091SMatthew Wilcox (Oracle) {
8820b3eb091SMatthew Wilcox (Oracle) }
8830b3eb091SMatthew Wilcox (Oracle) #endif
8840b3eb091SMatthew Wilcox (Oracle) 
8855240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */
886