xref: /linux/mm/slab.h (revision 100c85421b52e41269ada88f7d71a6b8a06c7a11)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
297d06609SChristoph Lameter #ifndef MM_SLAB_H
397d06609SChristoph Lameter #define MM_SLAB_H
489c2d061SVlastimil Babka 
589c2d061SVlastimil Babka #include <linux/reciprocal_div.h>
689c2d061SVlastimil Babka #include <linux/list_lru.h>
789c2d061SVlastimil Babka #include <linux/local_lock.h>
889c2d061SVlastimil Babka #include <linux/random.h>
989c2d061SVlastimil Babka #include <linux/kobject.h>
1089c2d061SVlastimil Babka #include <linux/sched/mm.h>
1189c2d061SVlastimil Babka #include <linux/memcontrol.h>
1289c2d061SVlastimil Babka #include <linux/kfence.h>
1389c2d061SVlastimil Babka #include <linux/kasan.h>
1489c2d061SVlastimil Babka 
1597d06609SChristoph Lameter /*
1697d06609SChristoph Lameter  * Internal slab definitions
1797d06609SChristoph Lameter  */
1897d06609SChristoph Lameter 
196801be4fSPeter Zijlstra #ifdef CONFIG_64BIT
206801be4fSPeter Zijlstra # ifdef system_has_cmpxchg128
216801be4fSPeter Zijlstra # define system_has_freelist_aba()	system_has_cmpxchg128()
226801be4fSPeter Zijlstra # define try_cmpxchg_freelist		try_cmpxchg128
236801be4fSPeter Zijlstra # endif
246801be4fSPeter Zijlstra #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg128
256801be4fSPeter Zijlstra typedef u128 freelist_full_t;
266801be4fSPeter Zijlstra #else /* CONFIG_64BIT */
276801be4fSPeter Zijlstra # ifdef system_has_cmpxchg64
286801be4fSPeter Zijlstra # define system_has_freelist_aba()	system_has_cmpxchg64()
296801be4fSPeter Zijlstra # define try_cmpxchg_freelist		try_cmpxchg64
306801be4fSPeter Zijlstra # endif
316801be4fSPeter Zijlstra #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg64
326801be4fSPeter Zijlstra typedef u64 freelist_full_t;
336801be4fSPeter Zijlstra #endif /* CONFIG_64BIT */
346801be4fSPeter Zijlstra 
356801be4fSPeter Zijlstra #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
366801be4fSPeter Zijlstra #undef system_has_freelist_aba
376801be4fSPeter Zijlstra #endif
386801be4fSPeter Zijlstra 
396801be4fSPeter Zijlstra /*
406801be4fSPeter Zijlstra  * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
416801be4fSPeter Zijlstra  * problems with cmpxchg of just a pointer.
426801be4fSPeter Zijlstra  */
436801be4fSPeter Zijlstra typedef union {
446801be4fSPeter Zijlstra 	struct {
456801be4fSPeter Zijlstra 		void *freelist;
466801be4fSPeter Zijlstra 		unsigned long counter;
476801be4fSPeter Zijlstra 	};
486801be4fSPeter Zijlstra 	freelist_full_t full;
496801be4fSPeter Zijlstra } freelist_aba_t;
506801be4fSPeter Zijlstra 
51d122019bSMatthew Wilcox (Oracle) /* Reuses the bits in struct page */
52d122019bSMatthew Wilcox (Oracle) struct slab {
53d122019bSMatthew Wilcox (Oracle) 	unsigned long __page_flags;
54401fb12cSVlastimil Babka 
55130d4df5SVlastimil Babka 	struct kmem_cache *slab_cache;
56130d4df5SVlastimil Babka 	union {
57130d4df5SVlastimil Babka 		struct {
58401fb12cSVlastimil Babka 			union {
59401fb12cSVlastimil Babka 				struct list_head slab_list;
609c01e9afSVlastimil Babka #ifdef CONFIG_SLUB_CPU_PARTIAL
61401fb12cSVlastimil Babka 				struct {
62d122019bSMatthew Wilcox (Oracle) 					struct slab *next;
63d122019bSMatthew Wilcox (Oracle) 					int slabs;	/* Nr of slabs left */
64d122019bSMatthew Wilcox (Oracle) 				};
659c01e9afSVlastimil Babka #endif
66d122019bSMatthew Wilcox (Oracle) 			};
67d122019bSMatthew Wilcox (Oracle) 			/* Double-word boundary */
686801be4fSPeter Zijlstra 			union {
696801be4fSPeter Zijlstra 				struct {
70d122019bSMatthew Wilcox (Oracle) 					void *freelist;		/* first free object */
71d122019bSMatthew Wilcox (Oracle) 					union {
72401fb12cSVlastimil Babka 						unsigned long counters;
73401fb12cSVlastimil Babka 						struct {
74d122019bSMatthew Wilcox (Oracle) 							unsigned inuse:16;
75d122019bSMatthew Wilcox (Oracle) 							unsigned objects:15;
76d122019bSMatthew Wilcox (Oracle) 							unsigned frozen:1;
77d122019bSMatthew Wilcox (Oracle) 						};
78d122019bSMatthew Wilcox (Oracle) 					};
79130d4df5SVlastimil Babka 				};
806801be4fSPeter Zijlstra #ifdef system_has_freelist_aba
816801be4fSPeter Zijlstra 				freelist_aba_t freelist_counter;
826801be4fSPeter Zijlstra #endif
836801be4fSPeter Zijlstra 			};
846801be4fSPeter Zijlstra 		};
85130d4df5SVlastimil Babka 		struct rcu_head rcu_head;
86130d4df5SVlastimil Babka 	};
87401fb12cSVlastimil Babka 	unsigned int __unused;
88d122019bSMatthew Wilcox (Oracle) 
89d122019bSMatthew Wilcox (Oracle) 	atomic_t __page_refcount;
90d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
91d122019bSMatthew Wilcox (Oracle) 	unsigned long memcg_data;
92d122019bSMatthew Wilcox (Oracle) #endif
93d122019bSMatthew Wilcox (Oracle) };
94d122019bSMatthew Wilcox (Oracle) 
95d122019bSMatthew Wilcox (Oracle) #define SLAB_MATCH(pg, sl)						\
96d122019bSMatthew Wilcox (Oracle) 	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
97d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(flags, __page_flags);
98130d4df5SVlastimil Babka SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
99d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(_refcount, __page_refcount);
100d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
101d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(memcg_data, memcg_data);
102d122019bSMatthew Wilcox (Oracle) #endif
103d122019bSMatthew Wilcox (Oracle) #undef SLAB_MATCH
104d122019bSMatthew Wilcox (Oracle) static_assert(sizeof(struct slab) <= sizeof(struct page));
105a9e0b9f2SVlastimil Babka #if defined(system_has_freelist_aba)
1066801be4fSPeter Zijlstra static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
107130d4df5SVlastimil Babka #endif
108d122019bSMatthew Wilcox (Oracle) 
109d122019bSMatthew Wilcox (Oracle) /**
110d122019bSMatthew Wilcox (Oracle)  * folio_slab - Converts from folio to slab.
111d122019bSMatthew Wilcox (Oracle)  * @folio: The folio.
112d122019bSMatthew Wilcox (Oracle)  *
113d122019bSMatthew Wilcox (Oracle)  * Currently struct slab is a different representation of a folio where
114d122019bSMatthew Wilcox (Oracle)  * folio_test_slab() is true.
115d122019bSMatthew Wilcox (Oracle)  *
116d122019bSMatthew Wilcox (Oracle)  * Return: The slab which contains this folio.
117d122019bSMatthew Wilcox (Oracle)  */
118d122019bSMatthew Wilcox (Oracle) #define folio_slab(folio)	(_Generic((folio),			\
119d122019bSMatthew Wilcox (Oracle) 	const struct folio *:	(const struct slab *)(folio),		\
120d122019bSMatthew Wilcox (Oracle) 	struct folio *:		(struct slab *)(folio)))
121d122019bSMatthew Wilcox (Oracle) 
122d122019bSMatthew Wilcox (Oracle) /**
123d122019bSMatthew Wilcox (Oracle)  * slab_folio - The folio allocated for a slab
124d122019bSMatthew Wilcox (Oracle)  * @slab: The slab.
125d122019bSMatthew Wilcox (Oracle)  *
126d122019bSMatthew Wilcox (Oracle)  * Slabs are allocated as folios that contain the individual objects and are
127d122019bSMatthew Wilcox (Oracle)  * using some fields in the first struct page of the folio - those fields are
128d122019bSMatthew Wilcox (Oracle)  * now accessed by struct slab. It is occasionally necessary to convert back to
129d122019bSMatthew Wilcox (Oracle)  * a folio in order to communicate with the rest of the mm.  Please use this
130d122019bSMatthew Wilcox (Oracle)  * helper function instead of casting yourself, as the implementation may change
131d122019bSMatthew Wilcox (Oracle)  * in the future.
132d122019bSMatthew Wilcox (Oracle)  */
133d122019bSMatthew Wilcox (Oracle) #define slab_folio(s)		(_Generic((s),				\
134d122019bSMatthew Wilcox (Oracle) 	const struct slab *:	(const struct folio *)s,		\
135d122019bSMatthew Wilcox (Oracle) 	struct slab *:		(struct folio *)s))
136d122019bSMatthew Wilcox (Oracle) 
137d122019bSMatthew Wilcox (Oracle) /**
138d122019bSMatthew Wilcox (Oracle)  * page_slab - Converts from first struct page to slab.
139d122019bSMatthew Wilcox (Oracle)  * @p: The first (either head of compound or single) page of slab.
140d122019bSMatthew Wilcox (Oracle)  *
141d122019bSMatthew Wilcox (Oracle)  * A temporary wrapper to convert struct page to struct slab in situations where
142d122019bSMatthew Wilcox (Oracle)  * we know the page is the compound head, or single order-0 page.
143d122019bSMatthew Wilcox (Oracle)  *
144d122019bSMatthew Wilcox (Oracle)  * Long-term ideally everything would work with struct slab directly or go
145d122019bSMatthew Wilcox (Oracle)  * through folio to struct slab.
146d122019bSMatthew Wilcox (Oracle)  *
147d122019bSMatthew Wilcox (Oracle)  * Return: The slab which contains this page
148d122019bSMatthew Wilcox (Oracle)  */
149d122019bSMatthew Wilcox (Oracle) #define page_slab(p)		(_Generic((p),				\
150d122019bSMatthew Wilcox (Oracle) 	const struct page *:	(const struct slab *)(p),		\
151d122019bSMatthew Wilcox (Oracle) 	struct page *:		(struct slab *)(p)))
152d122019bSMatthew Wilcox (Oracle) 
153d122019bSMatthew Wilcox (Oracle) /**
154d122019bSMatthew Wilcox (Oracle)  * slab_page - The first struct page allocated for a slab
155d122019bSMatthew Wilcox (Oracle)  * @slab: The slab.
156d122019bSMatthew Wilcox (Oracle)  *
157d122019bSMatthew Wilcox (Oracle)  * A convenience wrapper for converting slab to the first struct page of the
158d122019bSMatthew Wilcox (Oracle)  * underlying folio, to communicate with code not yet converted to folio or
159d122019bSMatthew Wilcox (Oracle)  * struct slab.
160d122019bSMatthew Wilcox (Oracle)  */
161d122019bSMatthew Wilcox (Oracle) #define slab_page(s) folio_page(slab_folio(s), 0)
162d122019bSMatthew Wilcox (Oracle) 
163d122019bSMatthew Wilcox (Oracle) /*
164d122019bSMatthew Wilcox (Oracle)  * If network-based swap is enabled, sl*b must keep track of whether pages
165d122019bSMatthew Wilcox (Oracle)  * were allocated from pfmemalloc reserves.
166d122019bSMatthew Wilcox (Oracle)  */
167d122019bSMatthew Wilcox (Oracle) static inline bool slab_test_pfmemalloc(const struct slab *slab)
168d122019bSMatthew Wilcox (Oracle) {
169d122019bSMatthew Wilcox (Oracle) 	return folio_test_active((struct folio *)slab_folio(slab));
170d122019bSMatthew Wilcox (Oracle) }
171d122019bSMatthew Wilcox (Oracle) 
172d122019bSMatthew Wilcox (Oracle) static inline void slab_set_pfmemalloc(struct slab *slab)
173d122019bSMatthew Wilcox (Oracle) {
174d122019bSMatthew Wilcox (Oracle) 	folio_set_active(slab_folio(slab));
175d122019bSMatthew Wilcox (Oracle) }
176d122019bSMatthew Wilcox (Oracle) 
177d122019bSMatthew Wilcox (Oracle) static inline void slab_clear_pfmemalloc(struct slab *slab)
178d122019bSMatthew Wilcox (Oracle) {
179d122019bSMatthew Wilcox (Oracle) 	folio_clear_active(slab_folio(slab));
180d122019bSMatthew Wilcox (Oracle) }
181d122019bSMatthew Wilcox (Oracle) 
182d122019bSMatthew Wilcox (Oracle) static inline void __slab_clear_pfmemalloc(struct slab *slab)
183d122019bSMatthew Wilcox (Oracle) {
184d122019bSMatthew Wilcox (Oracle) 	__folio_clear_active(slab_folio(slab));
185d122019bSMatthew Wilcox (Oracle) }
186d122019bSMatthew Wilcox (Oracle) 
187d122019bSMatthew Wilcox (Oracle) static inline void *slab_address(const struct slab *slab)
188d122019bSMatthew Wilcox (Oracle) {
189d122019bSMatthew Wilcox (Oracle) 	return folio_address(slab_folio(slab));
190d122019bSMatthew Wilcox (Oracle) }
191d122019bSMatthew Wilcox (Oracle) 
192d122019bSMatthew Wilcox (Oracle) static inline int slab_nid(const struct slab *slab)
193d122019bSMatthew Wilcox (Oracle) {
194d122019bSMatthew Wilcox (Oracle) 	return folio_nid(slab_folio(slab));
195d122019bSMatthew Wilcox (Oracle) }
196d122019bSMatthew Wilcox (Oracle) 
197d122019bSMatthew Wilcox (Oracle) static inline pg_data_t *slab_pgdat(const struct slab *slab)
198d122019bSMatthew Wilcox (Oracle) {
199d122019bSMatthew Wilcox (Oracle) 	return folio_pgdat(slab_folio(slab));
200d122019bSMatthew Wilcox (Oracle) }
201d122019bSMatthew Wilcox (Oracle) 
202d122019bSMatthew Wilcox (Oracle) static inline struct slab *virt_to_slab(const void *addr)
203d122019bSMatthew Wilcox (Oracle) {
204d122019bSMatthew Wilcox (Oracle) 	struct folio *folio = virt_to_folio(addr);
205d122019bSMatthew Wilcox (Oracle) 
206d122019bSMatthew Wilcox (Oracle) 	if (!folio_test_slab(folio))
207d122019bSMatthew Wilcox (Oracle) 		return NULL;
208d122019bSMatthew Wilcox (Oracle) 
209d122019bSMatthew Wilcox (Oracle) 	return folio_slab(folio);
210d122019bSMatthew Wilcox (Oracle) }
211d122019bSMatthew Wilcox (Oracle) 
212d122019bSMatthew Wilcox (Oracle) static inline int slab_order(const struct slab *slab)
213d122019bSMatthew Wilcox (Oracle) {
214d122019bSMatthew Wilcox (Oracle) 	return folio_order((struct folio *)slab_folio(slab));
215d122019bSMatthew Wilcox (Oracle) }
216d122019bSMatthew Wilcox (Oracle) 
217d122019bSMatthew Wilcox (Oracle) static inline size_t slab_size(const struct slab *slab)
218d122019bSMatthew Wilcox (Oracle) {
219d122019bSMatthew Wilcox (Oracle) 	return PAGE_SIZE << slab_order(slab);
220d122019bSMatthew Wilcox (Oracle) }
221d122019bSMatthew Wilcox (Oracle) 
22219975f83SVlastimil Babka #ifdef CONFIG_SLUB_CPU_PARTIAL
22319975f83SVlastimil Babka #define slub_percpu_partial(c)			((c)->partial)
22419975f83SVlastimil Babka 
22519975f83SVlastimil Babka #define slub_set_percpu_partial(c, p)		\
22619975f83SVlastimil Babka ({						\
22719975f83SVlastimil Babka 	slub_percpu_partial(c) = (p)->next;	\
22819975f83SVlastimil Babka })
22919975f83SVlastimil Babka 
23019975f83SVlastimil Babka #define slub_percpu_partial_read_once(c)	READ_ONCE(slub_percpu_partial(c))
23119975f83SVlastimil Babka #else
23219975f83SVlastimil Babka #define slub_percpu_partial(c)			NULL
23319975f83SVlastimil Babka 
23419975f83SVlastimil Babka #define slub_set_percpu_partial(c, p)
23519975f83SVlastimil Babka 
23619975f83SVlastimil Babka #define slub_percpu_partial_read_once(c)	NULL
23719975f83SVlastimil Babka #endif // CONFIG_SLUB_CPU_PARTIAL
23819975f83SVlastimil Babka 
23919975f83SVlastimil Babka /*
24019975f83SVlastimil Babka  * Word size structure that can be atomically updated or read and that
24119975f83SVlastimil Babka  * contains both the order and the number of objects that a slab of the
24219975f83SVlastimil Babka  * given order would contain.
24319975f83SVlastimil Babka  */
24419975f83SVlastimil Babka struct kmem_cache_order_objects {
24519975f83SVlastimil Babka 	unsigned int x;
24619975f83SVlastimil Babka };
24719975f83SVlastimil Babka 
24819975f83SVlastimil Babka /*
24919975f83SVlastimil Babka  * Slab cache management.
25019975f83SVlastimil Babka  */
25119975f83SVlastimil Babka struct kmem_cache {
25219975f83SVlastimil Babka #ifndef CONFIG_SLUB_TINY
25319975f83SVlastimil Babka 	struct kmem_cache_cpu __percpu *cpu_slab;
25419975f83SVlastimil Babka #endif
25519975f83SVlastimil Babka 	/* Used for retrieving partial slabs, etc. */
25619975f83SVlastimil Babka 	slab_flags_t flags;
25719975f83SVlastimil Babka 	unsigned long min_partial;
25819975f83SVlastimil Babka 	unsigned int size;		/* Object size including metadata */
25919975f83SVlastimil Babka 	unsigned int object_size;	/* Object size without metadata */
26019975f83SVlastimil Babka 	struct reciprocal_value reciprocal_size;
26119975f83SVlastimil Babka 	unsigned int offset;		/* Free pointer offset */
26219975f83SVlastimil Babka #ifdef CONFIG_SLUB_CPU_PARTIAL
26319975f83SVlastimil Babka 	/* Number of per cpu partial objects to keep around */
26419975f83SVlastimil Babka 	unsigned int cpu_partial;
26519975f83SVlastimil Babka 	/* Number of per cpu partial slabs to keep around */
26619975f83SVlastimil Babka 	unsigned int cpu_partial_slabs;
26719975f83SVlastimil Babka #endif
26819975f83SVlastimil Babka 	struct kmem_cache_order_objects oo;
26919975f83SVlastimil Babka 
27019975f83SVlastimil Babka 	/* Allocation and freeing of slabs */
27119975f83SVlastimil Babka 	struct kmem_cache_order_objects min;
27219975f83SVlastimil Babka 	gfp_t allocflags;		/* gfp flags to use on each alloc */
27319975f83SVlastimil Babka 	int refcount;			/* Refcount for slab cache destroy */
27419975f83SVlastimil Babka 	void (*ctor)(void *object);	/* Object constructor */
27519975f83SVlastimil Babka 	unsigned int inuse;		/* Offset to metadata */
27619975f83SVlastimil Babka 	unsigned int align;		/* Alignment */
27719975f83SVlastimil Babka 	unsigned int red_left_pad;	/* Left redzone padding size */
27819975f83SVlastimil Babka 	const char *name;		/* Name (only for display!) */
27919975f83SVlastimil Babka 	struct list_head list;		/* List of slab caches */
28019975f83SVlastimil Babka #ifdef CONFIG_SYSFS
28119975f83SVlastimil Babka 	struct kobject kobj;		/* For sysfs */
28219975f83SVlastimil Babka #endif
28319975f83SVlastimil Babka #ifdef CONFIG_SLAB_FREELIST_HARDENED
28419975f83SVlastimil Babka 	unsigned long random;
28519975f83SVlastimil Babka #endif
28619975f83SVlastimil Babka 
28719975f83SVlastimil Babka #ifdef CONFIG_NUMA
28819975f83SVlastimil Babka 	/*
28919975f83SVlastimil Babka 	 * Defragmentation by allocating from a remote node.
29019975f83SVlastimil Babka 	 */
29119975f83SVlastimil Babka 	unsigned int remote_node_defrag_ratio;
29219975f83SVlastimil Babka #endif
29319975f83SVlastimil Babka 
29419975f83SVlastimil Babka #ifdef CONFIG_SLAB_FREELIST_RANDOM
29519975f83SVlastimil Babka 	unsigned int *random_seq;
29619975f83SVlastimil Babka #endif
29719975f83SVlastimil Babka 
29819975f83SVlastimil Babka #ifdef CONFIG_KASAN_GENERIC
29919975f83SVlastimil Babka 	struct kasan_cache kasan_info;
30019975f83SVlastimil Babka #endif
30119975f83SVlastimil Babka 
30219975f83SVlastimil Babka #ifdef CONFIG_HARDENED_USERCOPY
30319975f83SVlastimil Babka 	unsigned int useroffset;	/* Usercopy region offset */
30419975f83SVlastimil Babka 	unsigned int usersize;		/* Usercopy region size */
30519975f83SVlastimil Babka #endif
30619975f83SVlastimil Babka 
30719975f83SVlastimil Babka 	struct kmem_cache_node *node[MAX_NUMNODES];
30819975f83SVlastimil Babka };
30919975f83SVlastimil Babka 
31019975f83SVlastimil Babka #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
31119975f83SVlastimil Babka #define SLAB_SUPPORTS_SYSFS
31219975f83SVlastimil Babka void sysfs_slab_unlink(struct kmem_cache *s);
31319975f83SVlastimil Babka void sysfs_slab_release(struct kmem_cache *s);
31419975f83SVlastimil Babka #else
31519975f83SVlastimil Babka static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
31619975f83SVlastimil Babka static inline void sysfs_slab_release(struct kmem_cache *s) { }
31719975f83SVlastimil Babka #endif
31819975f83SVlastimil Babka 
31919975f83SVlastimil Babka void *fixup_red_left(struct kmem_cache *s, void *p);
32019975f83SVlastimil Babka 
32119975f83SVlastimil Babka static inline void *nearest_obj(struct kmem_cache *cache,
32219975f83SVlastimil Babka 				const struct slab *slab, void *x)
32319975f83SVlastimil Babka {
32419975f83SVlastimil Babka 	void *object = x - (x - slab_address(slab)) % cache->size;
32519975f83SVlastimil Babka 	void *last_object = slab_address(slab) +
32619975f83SVlastimil Babka 		(slab->objects - 1) * cache->size;
32719975f83SVlastimil Babka 	void *result = (unlikely(object > last_object)) ? last_object : object;
32819975f83SVlastimil Babka 
32919975f83SVlastimil Babka 	result = fixup_red_left(cache, result);
33019975f83SVlastimil Babka 	return result;
33119975f83SVlastimil Babka }
33219975f83SVlastimil Babka 
33319975f83SVlastimil Babka /* Determine object index from a given position */
33419975f83SVlastimil Babka static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
33519975f83SVlastimil Babka 					  void *addr, void *obj)
33619975f83SVlastimil Babka {
33719975f83SVlastimil Babka 	return reciprocal_divide(kasan_reset_tag(obj) - addr,
33819975f83SVlastimil Babka 				 cache->reciprocal_size);
33919975f83SVlastimil Babka }
34019975f83SVlastimil Babka 
34119975f83SVlastimil Babka static inline unsigned int obj_to_index(const struct kmem_cache *cache,
34219975f83SVlastimil Babka 					const struct slab *slab, void *obj)
34319975f83SVlastimil Babka {
34419975f83SVlastimil Babka 	if (is_kfence_address(obj))
34519975f83SVlastimil Babka 		return 0;
34619975f83SVlastimil Babka 	return __obj_to_index(cache, slab_address(slab), obj);
34719975f83SVlastimil Babka }
34819975f83SVlastimil Babka 
34919975f83SVlastimil Babka static inline int objs_per_slab(const struct kmem_cache *cache,
35019975f83SVlastimil Babka 				const struct slab *slab)
35119975f83SVlastimil Babka {
35219975f83SVlastimil Babka 	return slab->objects;
35319975f83SVlastimil Babka }
35407f361b2SJoonsoo Kim 
35597d06609SChristoph Lameter /*
35697d06609SChristoph Lameter  * State of the slab allocator.
35797d06609SChristoph Lameter  *
35897d06609SChristoph Lameter  * This is used to describe the states of the allocator during bootup.
35997d06609SChristoph Lameter  * Allocators use this to gradually bootstrap themselves. Most allocators
36097d06609SChristoph Lameter  * have the problem that the structures used for managing slab caches are
36197d06609SChristoph Lameter  * allocated from slab caches themselves.
36297d06609SChristoph Lameter  */
36397d06609SChristoph Lameter enum slab_state {
36497d06609SChristoph Lameter 	DOWN,			/* No slab functionality yet */
36597d06609SChristoph Lameter 	PARTIAL,		/* SLUB: kmem_cache_node available */
36697d06609SChristoph Lameter 	UP,			/* Slab caches usable but not all extras yet */
36797d06609SChristoph Lameter 	FULL			/* Everything is working */
36897d06609SChristoph Lameter };
36997d06609SChristoph Lameter 
37097d06609SChristoph Lameter extern enum slab_state slab_state;
37197d06609SChristoph Lameter 
37218004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */
37318004c5dSChristoph Lameter extern struct mutex slab_mutex;
3749b030cb8SChristoph Lameter 
3759b030cb8SChristoph Lameter /* The list of all slab caches on the system */
37618004c5dSChristoph Lameter extern struct list_head slab_caches;
37718004c5dSChristoph Lameter 
3789b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */
3799b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache;
3809b030cb8SChristoph Lameter 
381af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */
382af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct {
383cb5d9fb3SPengfei Li 	const char *name[NR_KMALLOC_TYPES];
38455de8b9cSAlexey Dobriyan 	unsigned int size;
385af3b5f87SVlastimil Babka } kmalloc_info[];
386af3b5f87SVlastimil Babka 
387f97d5f63SChristoph Lameter /* Kmalloc array related functions */
38834cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void);
38966b3dc1fSZheng Yejian void create_kmalloc_caches(void);
3902c59dd65SChristoph Lameter 
3915a9d31d9SVlastimil Babka extern u8 kmalloc_size_index[24];
3925a9d31d9SVlastimil Babka 
3935a9d31d9SVlastimil Babka static inline unsigned int size_index_elem(unsigned int bytes)
3945a9d31d9SVlastimil Babka {
3955a9d31d9SVlastimil Babka 	return (bytes - 1) / 8;
3965a9d31d9SVlastimil Babka }
3975a9d31d9SVlastimil Babka 
3985a9d31d9SVlastimil Babka /*
3995a9d31d9SVlastimil Babka  * Find the kmem_cache structure that serves a given size of
4005a9d31d9SVlastimil Babka  * allocation
4015a9d31d9SVlastimil Babka  *
4025a9d31d9SVlastimil Babka  * This assumes size is larger than zero and not larger than
4035a9d31d9SVlastimil Babka  * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
4045a9d31d9SVlastimil Babka  */
4055a9d31d9SVlastimil Babka static inline struct kmem_cache *
4065a9d31d9SVlastimil Babka kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
4075a9d31d9SVlastimil Babka {
4085a9d31d9SVlastimil Babka 	unsigned int index;
4095a9d31d9SVlastimil Babka 
4105a9d31d9SVlastimil Babka 	if (size <= 192)
4115a9d31d9SVlastimil Babka 		index = kmalloc_size_index[size_index_elem(size)];
4125a9d31d9SVlastimil Babka 	else
4135a9d31d9SVlastimil Babka 		index = fls(size - 1);
4145a9d31d9SVlastimil Babka 
4155a9d31d9SVlastimil Babka 	return kmalloc_caches[kmalloc_type(flags, caller)][index];
4165a9d31d9SVlastimil Babka }
417ed4cd17eSHyeonggon Yoo 
41844405099SLong Li gfp_t kmalloc_fix_flags(gfp_t flags);
419f97d5f63SChristoph Lameter 
4209b030cb8SChristoph Lameter /* Functions provided by the slab allocators */
421d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
42297d06609SChristoph Lameter 
42389c2d061SVlastimil Babka void __init kmem_cache_init(void);
42445530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name,
425361d575eSAlexey Dobriyan 			unsigned int size, slab_flags_t flags,
426361d575eSAlexey Dobriyan 			unsigned int useroffset, unsigned int usersize);
42745530c44SChristoph Lameter 
428423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s);
429f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align,
430d50112edSAlexey Dobriyan 		slab_flags_t flags, const char *name, void (*ctor)(void *));
4312633d7a0SGlauber Costa struct kmem_cache *
432f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
433d50112edSAlexey Dobriyan 		   slab_flags_t flags, void (*ctor)(void *));
434423c929cSJoonsoo Kim 
435*303cd693SChengming Zhou slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
436cbb79694SChristoph Lameter 
437bb944290SFeng Tang static inline bool is_kmalloc_cache(struct kmem_cache *s)
438bb944290SFeng Tang {
439bb944290SFeng Tang 	return (s->flags & SLAB_KMALLOC);
440bb944290SFeng Tang }
441cbb79694SChristoph Lameter 
442d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */
4436d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
4446d6ea1e9SNicolas Boichat 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
4455f0d5a3aSPaul E. McKenney 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
446d8843922SGlauber Costa 
447a9e0b9f2SVlastimil Babka #ifdef CONFIG_SLUB_DEBUG
448d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
449becfda68SLaura Abbott 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
450d8843922SGlauber Costa #else
451d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0)
452d8843922SGlauber Costa #endif
453d8843922SGlauber Costa 
454d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
4556cd6d33cSFeng Tang 			  SLAB_TEMPORARY | SLAB_ACCOUNT | \
456d0bf7d57SJesper Dangaard Brouer 			  SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
457d8843922SGlauber Costa 
458e70954fdSThomas Garnier /* Common flags available with current configuration */
459d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
460d8843922SGlauber Costa 
461e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */
462e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
463e70954fdSThomas Garnier 			      SLAB_RED_ZONE | \
464e70954fdSThomas Garnier 			      SLAB_POISON | \
465e70954fdSThomas Garnier 			      SLAB_STORE_USER | \
466e70954fdSThomas Garnier 			      SLAB_TRACE | \
467e70954fdSThomas Garnier 			      SLAB_CONSISTENCY_CHECKS | \
468e70954fdSThomas Garnier 			      SLAB_NOLEAKTRACE | \
469e70954fdSThomas Garnier 			      SLAB_RECLAIM_ACCOUNT | \
470e70954fdSThomas Garnier 			      SLAB_TEMPORARY | \
471a285909fSHyeonggon Yoo 			      SLAB_ACCOUNT | \
4726cd6d33cSFeng Tang 			      SLAB_KMALLOC | \
473d0bf7d57SJesper Dangaard Brouer 			      SLAB_NO_MERGE | \
474a285909fSHyeonggon Yoo 			      SLAB_NO_USER_FLAGS)
475e70954fdSThomas Garnier 
476f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *);
477945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *);
47852b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *);
479c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *);
48041a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *);
481945cf2b6SChristoph Lameter 
482b7454ad3SGlauber Costa struct seq_file;
483b7454ad3SGlauber Costa struct file;
484b7454ad3SGlauber Costa 
4850d7561c6SGlauber Costa struct slabinfo {
4860d7561c6SGlauber Costa 	unsigned long active_objs;
4870d7561c6SGlauber Costa 	unsigned long num_objs;
4880d7561c6SGlauber Costa 	unsigned long active_slabs;
4890d7561c6SGlauber Costa 	unsigned long num_slabs;
4900d7561c6SGlauber Costa 	unsigned long shared_avail;
4910d7561c6SGlauber Costa 	unsigned int limit;
4920d7561c6SGlauber Costa 	unsigned int batchcount;
4930d7561c6SGlauber Costa 	unsigned int shared;
4940d7561c6SGlauber Costa 	unsigned int objects_per_slab;
4950d7561c6SGlauber Costa 	unsigned int cache_order;
4960d7561c6SGlauber Costa };
4970d7561c6SGlauber Costa 
4980d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
4990d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
500b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer,
501b7454ad3SGlauber Costa 		       size_t count, loff_t *ppos);
502ba6c496eSGlauber Costa 
503e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG
504e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG_ON
505e42f174eSVlastimil Babka DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
506e42f174eSVlastimil Babka #else
507e42f174eSVlastimil Babka DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
508e42f174eSVlastimil Babka #endif
509e42f174eSVlastimil Babka extern void print_tracking(struct kmem_cache *s, void *object);
5101f9f78b1SOliver Glitta long validate_slab_cache(struct kmem_cache *s);
5110d4a062aSMarco Elver static inline bool __slub_debug_enabled(void)
5120d4a062aSMarco Elver {
5130d4a062aSMarco Elver 	return static_branch_unlikely(&slub_debug_enabled);
5140d4a062aSMarco Elver }
515e42f174eSVlastimil Babka #else
516e42f174eSVlastimil Babka static inline void print_tracking(struct kmem_cache *s, void *object)
517e42f174eSVlastimil Babka {
518e42f174eSVlastimil Babka }
5190d4a062aSMarco Elver static inline bool __slub_debug_enabled(void)
5200d4a062aSMarco Elver {
5210d4a062aSMarco Elver 	return false;
5220d4a062aSMarco Elver }
523e42f174eSVlastimil Babka #endif
524e42f174eSVlastimil Babka 
525e42f174eSVlastimil Babka /*
526671776b3SXiongwei Song  * Returns true if any of the specified slab_debug flags is enabled for the
527e42f174eSVlastimil Babka  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
528e42f174eSVlastimil Babka  * the static key.
529e42f174eSVlastimil Babka  */
530e42f174eSVlastimil Babka static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
531e42f174eSVlastimil Babka {
5320d4a062aSMarco Elver 	if (IS_ENABLED(CONFIG_SLUB_DEBUG))
533e42f174eSVlastimil Babka 		VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
5340d4a062aSMarco Elver 	if (__slub_debug_enabled())
535e42f174eSVlastimil Babka 		return s->flags & flags;
536e42f174eSVlastimil Babka 	return false;
537e42f174eSVlastimil Babka }
538e42f174eSVlastimil Babka 
53984c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM
5404b5f8d9aSVlastimil Babka /*
5414b5f8d9aSVlastimil Babka  * slab_objcgs - get the object cgroups vector associated with a slab
5424b5f8d9aSVlastimil Babka  * @slab: a pointer to the slab struct
5434b5f8d9aSVlastimil Babka  *
5444b5f8d9aSVlastimil Babka  * Returns a pointer to the object cgroups vector associated with the slab,
5454b5f8d9aSVlastimil Babka  * or NULL if no such vector has been associated yet.
5464b5f8d9aSVlastimil Babka  */
5474b5f8d9aSVlastimil Babka static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
5484b5f8d9aSVlastimil Babka {
5494b5f8d9aSVlastimil Babka 	unsigned long memcg_data = READ_ONCE(slab->memcg_data);
5504b5f8d9aSVlastimil Babka 
5514b5f8d9aSVlastimil Babka 	VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
5524b5f8d9aSVlastimil Babka 							slab_page(slab));
5534b5f8d9aSVlastimil Babka 	VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
5544b5f8d9aSVlastimil Babka 
5554b5f8d9aSVlastimil Babka 	return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
5564b5f8d9aSVlastimil Babka }
5574b5f8d9aSVlastimil Babka 
5584b5f8d9aSVlastimil Babka int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
5594b5f8d9aSVlastimil Babka 				 gfp_t gfp, bool new_slab);
560fdbcb2a6SWaiman Long void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
561fdbcb2a6SWaiman Long 		     enum node_stat_item idx, int nr);
56284c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */
5634b5f8d9aSVlastimil Babka static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
5644b5f8d9aSVlastimil Babka {
5654b5f8d9aSVlastimil Babka 	return NULL;
5664b5f8d9aSVlastimil Babka }
5674b5f8d9aSVlastimil Babka 
5684b5f8d9aSVlastimil Babka static inline int memcg_alloc_slab_cgroups(struct slab *slab,
5692e9bd483SRoman Gushchin 					       struct kmem_cache *s, gfp_t gfp,
5704b5f8d9aSVlastimil Babka 					       bool new_slab)
571286e04b8SRoman Gushchin {
572286e04b8SRoman Gushchin 	return 0;
573286e04b8SRoman Gushchin }
57484c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */
575b9ce5ef4SGlauber Costa 
5768dfa9d55SHyeonggon Yoo size_t __ksize(const void *objp);
5778dfa9d55SHyeonggon Yoo 
57811c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s)
57911c7aec2SJesper Dangaard Brouer {
58011c7aec2SJesper Dangaard Brouer #ifdef CONFIG_SLUB_DEBUG
58111c7aec2SJesper Dangaard Brouer 	/*
58211c7aec2SJesper Dangaard Brouer 	 * Debugging requires use of the padding between object
58311c7aec2SJesper Dangaard Brouer 	 * and whatever may come after it.
58411c7aec2SJesper Dangaard Brouer 	 */
58511c7aec2SJesper Dangaard Brouer 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
58611c7aec2SJesper Dangaard Brouer 		return s->object_size;
58711c7aec2SJesper Dangaard Brouer #endif
58880a9201aSAlexander Potapenko 	if (s->flags & SLAB_KASAN)
58980a9201aSAlexander Potapenko 		return s->object_size;
59011c7aec2SJesper Dangaard Brouer 	/*
59111c7aec2SJesper Dangaard Brouer 	 * If we have the need to store the freelist pointer
59211c7aec2SJesper Dangaard Brouer 	 * back there or track user information then we can
59311c7aec2SJesper Dangaard Brouer 	 * only use the space before that information.
59411c7aec2SJesper Dangaard Brouer 	 */
5955f0d5a3aSPaul E. McKenney 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
59611c7aec2SJesper Dangaard Brouer 		return s->inuse;
59711c7aec2SJesper Dangaard Brouer 	/*
59811c7aec2SJesper Dangaard Brouer 	 * Else we can use all the padding etc for the allocation
59911c7aec2SJesper Dangaard Brouer 	 */
60011c7aec2SJesper Dangaard Brouer 	return s->size;
60111c7aec2SJesper Dangaard Brouer }
60211c7aec2SJesper Dangaard Brouer 
603a9e0b9f2SVlastimil Babka #ifdef CONFIG_SLUB_DEBUG
604852d8be0SYang Shi void dump_unreclaimable_slab(void);
605852d8be0SYang Shi #else
606852d8be0SYang Shi static inline void dump_unreclaimable_slab(void)
607852d8be0SYang Shi {
608852d8be0SYang Shi }
609852d8be0SYang Shi #endif
610852d8be0SYang Shi 
61155834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
61255834c59SAlexander Potapenko 
6137c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM
6147c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
6157c00fce9SThomas Garnier 			gfp_t gfp);
6167c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep);
6177c00fce9SThomas Garnier #else
6187c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep,
6197c00fce9SThomas Garnier 					unsigned int count, gfp_t gfp)
6207c00fce9SThomas Garnier {
6217c00fce9SThomas Garnier 	return 0;
6227c00fce9SThomas Garnier }
6237c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
6247c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */
6257c00fce9SThomas Garnier 
6266471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
6276471384aSAlexander Potapenko {
62851cba1ebSKees Cook 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
62951cba1ebSKees Cook 				&init_on_alloc)) {
6306471384aSAlexander Potapenko 		if (c->ctor)
6316471384aSAlexander Potapenko 			return false;
6326471384aSAlexander Potapenko 		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
6336471384aSAlexander Potapenko 			return flags & __GFP_ZERO;
6346471384aSAlexander Potapenko 		return true;
6356471384aSAlexander Potapenko 	}
6366471384aSAlexander Potapenko 	return flags & __GFP_ZERO;
6376471384aSAlexander Potapenko }
6386471384aSAlexander Potapenko 
6396471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c)
6406471384aSAlexander Potapenko {
64151cba1ebSKees Cook 	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
64251cba1ebSKees Cook 				&init_on_free))
6436471384aSAlexander Potapenko 		return !(c->ctor ||
6446471384aSAlexander Potapenko 			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
6456471384aSAlexander Potapenko 	return false;
6466471384aSAlexander Potapenko }
6476471384aSAlexander Potapenko 
64864dd6849SFaiyaz Mohammed #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
64964dd6849SFaiyaz Mohammed void debugfs_slab_release(struct kmem_cache *);
65064dd6849SFaiyaz Mohammed #else
65164dd6849SFaiyaz Mohammed static inline void debugfs_slab_release(struct kmem_cache *s) { }
65264dd6849SFaiyaz Mohammed #endif
65364dd6849SFaiyaz Mohammed 
6545bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK
6558e7f37f2SPaul E. McKenney #define KS_ADDRS_COUNT 16
6568e7f37f2SPaul E. McKenney struct kmem_obj_info {
6578e7f37f2SPaul E. McKenney 	void *kp_ptr;
6587213230aSMatthew Wilcox (Oracle) 	struct slab *kp_slab;
6598e7f37f2SPaul E. McKenney 	void *kp_objp;
6608e7f37f2SPaul E. McKenney 	unsigned long kp_data_offset;
6618e7f37f2SPaul E. McKenney 	struct kmem_cache *kp_slab_cache;
6628e7f37f2SPaul E. McKenney 	void *kp_ret;
6638e7f37f2SPaul E. McKenney 	void *kp_stack[KS_ADDRS_COUNT];
664e548eaa1SManinder Singh 	void *kp_free_stack[KS_ADDRS_COUNT];
6658e7f37f2SPaul E. McKenney };
6662dfe63e6SMarco Elver void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
6675bb1bb35SPaul E. McKenney #endif
6688e7f37f2SPaul E. McKenney 
6690b3eb091SMatthew Wilcox (Oracle) void __check_heap_object(const void *ptr, unsigned long n,
6700b3eb091SMatthew Wilcox (Oracle) 			 const struct slab *slab, bool to_user);
6710b3eb091SMatthew Wilcox (Oracle) 
672946fa0dbSFeng Tang #ifdef CONFIG_SLUB_DEBUG
673946fa0dbSFeng Tang void skip_orig_size_check(struct kmem_cache *s, const void *object);
674946fa0dbSFeng Tang #endif
675946fa0dbSFeng Tang 
6765240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */
677