xref: /linux/mm/slab.h (revision cb5d9fb38c3434ab6276bac500dfffe78649400b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
297d06609SChristoph Lameter #ifndef MM_SLAB_H
397d06609SChristoph Lameter #define MM_SLAB_H
497d06609SChristoph Lameter /*
597d06609SChristoph Lameter  * Internal slab definitions
697d06609SChristoph Lameter  */
797d06609SChristoph Lameter 
807f361b2SJoonsoo Kim #ifdef CONFIG_SLOB
907f361b2SJoonsoo Kim /*
1007f361b2SJoonsoo Kim  * Common fields provided in kmem_cache by all slab allocators
1107f361b2SJoonsoo Kim  * This struct is either used directly by the allocator (SLOB)
1207f361b2SJoonsoo Kim  * or the allocator must include definitions for all fields
1307f361b2SJoonsoo Kim  * provided in kmem_cache_common in their definition of kmem_cache.
1407f361b2SJoonsoo Kim  *
1507f361b2SJoonsoo Kim  * Once we can do anonymous structs (C11 standard) we could put a
1607f361b2SJoonsoo Kim  * anonymous struct definition in these allocators so that the
1707f361b2SJoonsoo Kim  * separate allocations in the kmem_cache structure of SLAB and
1807f361b2SJoonsoo Kim  * SLUB is no longer needed.
1907f361b2SJoonsoo Kim  */
2007f361b2SJoonsoo Kim struct kmem_cache {
2107f361b2SJoonsoo Kim 	unsigned int object_size;/* The original size of the object */
2207f361b2SJoonsoo Kim 	unsigned int size;	/* The aligned/padded/added on size  */
2307f361b2SJoonsoo Kim 	unsigned int align;	/* Alignment as calculated */
24d50112edSAlexey Dobriyan 	slab_flags_t flags;	/* Active flags on the slab */
257bbdb81eSAlexey Dobriyan 	unsigned int useroffset;/* Usercopy region offset */
267bbdb81eSAlexey Dobriyan 	unsigned int usersize;	/* Usercopy region size */
2707f361b2SJoonsoo Kim 	const char *name;	/* Slab name for sysfs */
2807f361b2SJoonsoo Kim 	int refcount;		/* Use counter */
2907f361b2SJoonsoo Kim 	void (*ctor)(void *);	/* Called on object slot creation */
3007f361b2SJoonsoo Kim 	struct list_head list;	/* List of all slab caches on the system */
3107f361b2SJoonsoo Kim };
3207f361b2SJoonsoo Kim 
339adeaa22SWaiman Long #else /* !CONFIG_SLOB */
349adeaa22SWaiman Long 
359adeaa22SWaiman Long struct memcg_cache_array {
369adeaa22SWaiman Long 	struct rcu_head rcu;
379adeaa22SWaiman Long 	struct kmem_cache *entries[0];
389adeaa22SWaiman Long };
399adeaa22SWaiman Long 
409adeaa22SWaiman Long /*
419adeaa22SWaiman Long  * This is the main placeholder for memcg-related information in kmem caches.
429adeaa22SWaiman Long  * Both the root cache and the child caches will have it. For the root cache,
439adeaa22SWaiman Long  * this will hold a dynamically allocated array large enough to hold
449adeaa22SWaiman Long  * information about the currently limited memcgs in the system. To allow the
459adeaa22SWaiman Long  * array to be accessed without taking any locks, on relocation we free the old
469adeaa22SWaiman Long  * version only after a grace period.
479adeaa22SWaiman Long  *
489adeaa22SWaiman Long  * Root and child caches hold different metadata.
499adeaa22SWaiman Long  *
509adeaa22SWaiman Long  * @root_cache:	Common to root and child caches.  NULL for root, pointer to
519adeaa22SWaiman Long  *		the root cache for children.
529adeaa22SWaiman Long  *
539adeaa22SWaiman Long  * The following fields are specific to root caches.
549adeaa22SWaiman Long  *
559adeaa22SWaiman Long  * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
569adeaa22SWaiman Long  *		used to index child cachces during allocation and cleared
579adeaa22SWaiman Long  *		early during shutdown.
589adeaa22SWaiman Long  *
599adeaa22SWaiman Long  * @root_caches_node: List node for slab_root_caches list.
609adeaa22SWaiman Long  *
619adeaa22SWaiman Long  * @children:	List of all child caches.  While the child caches are also
629adeaa22SWaiman Long  *		reachable through @memcg_caches, a child cache remains on
639adeaa22SWaiman Long  *		this list until it is actually destroyed.
649adeaa22SWaiman Long  *
659adeaa22SWaiman Long  * The following fields are specific to child caches.
669adeaa22SWaiman Long  *
679adeaa22SWaiman Long  * @memcg:	Pointer to the memcg this cache belongs to.
689adeaa22SWaiman Long  *
699adeaa22SWaiman Long  * @children_node: List node for @root_cache->children list.
709adeaa22SWaiman Long  *
719adeaa22SWaiman Long  * @kmem_caches_node: List node for @memcg->kmem_caches list.
729adeaa22SWaiman Long  */
739adeaa22SWaiman Long struct memcg_cache_params {
749adeaa22SWaiman Long 	struct kmem_cache *root_cache;
759adeaa22SWaiman Long 	union {
769adeaa22SWaiman Long 		struct {
779adeaa22SWaiman Long 			struct memcg_cache_array __rcu *memcg_caches;
789adeaa22SWaiman Long 			struct list_head __root_caches_node;
799adeaa22SWaiman Long 			struct list_head children;
809adeaa22SWaiman Long 			bool dying;
819adeaa22SWaiman Long 		};
829adeaa22SWaiman Long 		struct {
839adeaa22SWaiman Long 			struct mem_cgroup *memcg;
849adeaa22SWaiman Long 			struct list_head children_node;
859adeaa22SWaiman Long 			struct list_head kmem_caches_node;
869adeaa22SWaiman Long 			struct percpu_ref refcnt;
879adeaa22SWaiman Long 
889adeaa22SWaiman Long 			void (*work_fn)(struct kmem_cache *);
899adeaa22SWaiman Long 			union {
909adeaa22SWaiman Long 				struct rcu_head rcu_head;
919adeaa22SWaiman Long 				struct work_struct work;
929adeaa22SWaiman Long 			};
939adeaa22SWaiman Long 		};
949adeaa22SWaiman Long 	};
959adeaa22SWaiman Long };
9607f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */
9707f361b2SJoonsoo Kim 
9807f361b2SJoonsoo Kim #ifdef CONFIG_SLAB
9907f361b2SJoonsoo Kim #include <linux/slab_def.h>
10007f361b2SJoonsoo Kim #endif
10107f361b2SJoonsoo Kim 
10207f361b2SJoonsoo Kim #ifdef CONFIG_SLUB
10307f361b2SJoonsoo Kim #include <linux/slub_def.h>
10407f361b2SJoonsoo Kim #endif
10507f361b2SJoonsoo Kim 
10607f361b2SJoonsoo Kim #include <linux/memcontrol.h>
10711c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h>
10811c7aec2SJesper Dangaard Brouer #include <linux/kasan.h>
10911c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h>
1107c00fce9SThomas Garnier #include <linux/random.h>
111d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h>
11207f361b2SJoonsoo Kim 
11397d06609SChristoph Lameter /*
11497d06609SChristoph Lameter  * State of the slab allocator.
11597d06609SChristoph Lameter  *
11697d06609SChristoph Lameter  * This is used to describe the states of the allocator during bootup.
11797d06609SChristoph Lameter  * Allocators use this to gradually bootstrap themselves. Most allocators
11897d06609SChristoph Lameter  * have the problem that the structures used for managing slab caches are
11997d06609SChristoph Lameter  * allocated from slab caches themselves.
12097d06609SChristoph Lameter  */
12197d06609SChristoph Lameter enum slab_state {
12297d06609SChristoph Lameter 	DOWN,			/* No slab functionality yet */
12397d06609SChristoph Lameter 	PARTIAL,		/* SLUB: kmem_cache_node available */
124ce8eb6c4SChristoph Lameter 	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
12597d06609SChristoph Lameter 	UP,			/* Slab caches usable but not all extras yet */
12697d06609SChristoph Lameter 	FULL			/* Everything is working */
12797d06609SChristoph Lameter };
12897d06609SChristoph Lameter 
12997d06609SChristoph Lameter extern enum slab_state slab_state;
13097d06609SChristoph Lameter 
13118004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */
13218004c5dSChristoph Lameter extern struct mutex slab_mutex;
1339b030cb8SChristoph Lameter 
1349b030cb8SChristoph Lameter /* The list of all slab caches on the system */
13518004c5dSChristoph Lameter extern struct list_head slab_caches;
13618004c5dSChristoph Lameter 
1379b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */
1389b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache;
1399b030cb8SChristoph Lameter 
140af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */
141af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct {
142*cb5d9fb3SPengfei Li 	const char *name[NR_KMALLOC_TYPES];
14355de8b9cSAlexey Dobriyan 	unsigned int size;
144af3b5f87SVlastimil Babka } kmalloc_info[];
145af3b5f87SVlastimil Babka 
146f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB
147f97d5f63SChristoph Lameter /* Kmalloc array related functions */
14834cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void);
149d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t);
1502c59dd65SChristoph Lameter 
1512c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */
1522c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t);
153f97d5f63SChristoph Lameter #endif
154f97d5f63SChristoph Lameter 
155f97d5f63SChristoph Lameter 
1569b030cb8SChristoph Lameter /* Functions provided by the slab allocators */
157d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
15897d06609SChristoph Lameter 
15955de8b9cSAlexey Dobriyan struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
16055de8b9cSAlexey Dobriyan 			slab_flags_t flags, unsigned int useroffset,
16155de8b9cSAlexey Dobriyan 			unsigned int usersize);
16245530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name,
163361d575eSAlexey Dobriyan 			unsigned int size, slab_flags_t flags,
164361d575eSAlexey Dobriyan 			unsigned int useroffset, unsigned int usersize);
16545530c44SChristoph Lameter 
166423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s);
167f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align,
168d50112edSAlexey Dobriyan 		slab_flags_t flags, const char *name, void (*ctor)(void *));
16912220deaSJoonsoo Kim #ifndef CONFIG_SLOB
1702633d7a0SGlauber Costa struct kmem_cache *
171f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
172d50112edSAlexey Dobriyan 		   slab_flags_t flags, void (*ctor)(void *));
173423c929cSJoonsoo Kim 
1740293d1fdSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned int object_size,
175d50112edSAlexey Dobriyan 	slab_flags_t flags, const char *name,
176423c929cSJoonsoo Kim 	void (*ctor)(void *));
177cbb79694SChristoph Lameter #else
1782633d7a0SGlauber Costa static inline struct kmem_cache *
179f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
180d50112edSAlexey Dobriyan 		   slab_flags_t flags, void (*ctor)(void *))
181cbb79694SChristoph Lameter { return NULL; }
182423c929cSJoonsoo Kim 
1830293d1fdSAlexey Dobriyan static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
184d50112edSAlexey Dobriyan 	slab_flags_t flags, const char *name,
185423c929cSJoonsoo Kim 	void (*ctor)(void *))
186423c929cSJoonsoo Kim {
187423c929cSJoonsoo Kim 	return flags;
188423c929cSJoonsoo Kim }
189cbb79694SChristoph Lameter #endif
190cbb79694SChristoph Lameter 
191cbb79694SChristoph Lameter 
192d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */
1936d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
1946d6ea1e9SNicolas Boichat 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
1955f0d5a3aSPaul E. McKenney 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
196d8843922SGlauber Costa 
197d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB)
198d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
199d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG)
200d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
201becfda68SLaura Abbott 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
202d8843922SGlauber Costa #else
203d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0)
204d8843922SGlauber Costa #endif
205d8843922SGlauber Costa 
206d8843922SGlauber Costa #if defined(CONFIG_SLAB)
207d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
208230e9fc2SVladimir Davydov 			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
20975f296d9SLevin, Alexander (Sasha Levin) 			  SLAB_ACCOUNT)
210d8843922SGlauber Costa #elif defined(CONFIG_SLUB)
211d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
21275f296d9SLevin, Alexander (Sasha Levin) 			  SLAB_TEMPORARY | SLAB_ACCOUNT)
213d8843922SGlauber Costa #else
214d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0)
215d8843922SGlauber Costa #endif
216d8843922SGlauber Costa 
217e70954fdSThomas Garnier /* Common flags available with current configuration */
218d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
219d8843922SGlauber Costa 
220e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */
221e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
222e70954fdSThomas Garnier 			      SLAB_RED_ZONE | \
223e70954fdSThomas Garnier 			      SLAB_POISON | \
224e70954fdSThomas Garnier 			      SLAB_STORE_USER | \
225e70954fdSThomas Garnier 			      SLAB_TRACE | \
226e70954fdSThomas Garnier 			      SLAB_CONSISTENCY_CHECKS | \
227e70954fdSThomas Garnier 			      SLAB_MEM_SPREAD | \
228e70954fdSThomas Garnier 			      SLAB_NOLEAKTRACE | \
229e70954fdSThomas Garnier 			      SLAB_RECLAIM_ACCOUNT | \
230e70954fdSThomas Garnier 			      SLAB_TEMPORARY | \
231e70954fdSThomas Garnier 			      SLAB_ACCOUNT)
232e70954fdSThomas Garnier 
233f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *);
234945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *);
23552b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *);
236c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *);
237c9fc5864STejun Heo void __kmemcg_cache_deactivate(struct kmem_cache *s);
23843486694SRoman Gushchin void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
23941a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *);
24004f768a3SWaiman Long void kmem_cache_shrink_all(struct kmem_cache *s);
241945cf2b6SChristoph Lameter 
242b7454ad3SGlauber Costa struct seq_file;
243b7454ad3SGlauber Costa struct file;
244b7454ad3SGlauber Costa 
2450d7561c6SGlauber Costa struct slabinfo {
2460d7561c6SGlauber Costa 	unsigned long active_objs;
2470d7561c6SGlauber Costa 	unsigned long num_objs;
2480d7561c6SGlauber Costa 	unsigned long active_slabs;
2490d7561c6SGlauber Costa 	unsigned long num_slabs;
2500d7561c6SGlauber Costa 	unsigned long shared_avail;
2510d7561c6SGlauber Costa 	unsigned int limit;
2520d7561c6SGlauber Costa 	unsigned int batchcount;
2530d7561c6SGlauber Costa 	unsigned int shared;
2540d7561c6SGlauber Costa 	unsigned int objects_per_slab;
2550d7561c6SGlauber Costa 	unsigned int cache_order;
2560d7561c6SGlauber Costa };
2570d7561c6SGlauber Costa 
2580d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
2590d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
260b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer,
261b7454ad3SGlauber Costa 		       size_t count, loff_t *ppos);
262ba6c496eSGlauber Costa 
263484748f0SChristoph Lameter /*
264484748f0SChristoph Lameter  * Generic implementation of bulk operations
265484748f0SChristoph Lameter  * These are useful for situations in which the allocator cannot
2669f706d68SJesper Dangaard Brouer  * perform optimizations. In that case segments of the object listed
267484748f0SChristoph Lameter  * may be allocated or freed using these operations.
268484748f0SChristoph Lameter  */
269484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
270865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
271484748f0SChristoph Lameter 
2726cea1d56SRoman Gushchin static inline int cache_vmstat_idx(struct kmem_cache *s)
2736cea1d56SRoman Gushchin {
2746cea1d56SRoman Gushchin 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
2756cea1d56SRoman Gushchin 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
2766cea1d56SRoman Gushchin }
2776cea1d56SRoman Gushchin 
27884c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM
279510ded33STejun Heo 
280510ded33STejun Heo /* List of all root caches. */
281510ded33STejun Heo extern struct list_head		slab_root_caches;
282510ded33STejun Heo #define root_caches_node	memcg_params.__root_caches_node
283510ded33STejun Heo 
284426589f5SVladimir Davydov /*
285426589f5SVladimir Davydov  * Iterate over all memcg caches of the given root cache. The caller must hold
286426589f5SVladimir Davydov  * slab_mutex.
287426589f5SVladimir Davydov  */
288426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \
2899eeadc8bSTejun Heo 	list_for_each_entry(iter, &(root)->memcg_params.children, \
2909eeadc8bSTejun Heo 			    memcg_params.children_node)
291426589f5SVladimir Davydov 
292ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s)
293ba6c496eSGlauber Costa {
2949eeadc8bSTejun Heo 	return !s->memcg_params.root_cache;
295ba6c496eSGlauber Costa }
2962633d7a0SGlauber Costa 
297b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s,
298b9ce5ef4SGlauber Costa 				      struct kmem_cache *p)
299b9ce5ef4SGlauber Costa {
300f7ce3190SVladimir Davydov 	return p == s || p == s->memcg_params.root_cache;
301b9ce5ef4SGlauber Costa }
302749c5415SGlauber Costa 
303749c5415SGlauber Costa /*
304749c5415SGlauber Costa  * We use suffixes to the name in memcg because we can't have caches
305749c5415SGlauber Costa  * created in the system with the same name. But when we print them
306749c5415SGlauber Costa  * locally, better refer to them with the base name
307749c5415SGlauber Costa  */
308749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s)
309749c5415SGlauber Costa {
310749c5415SGlauber Costa 	if (!is_root_cache(s))
311f7ce3190SVladimir Davydov 		s = s->memcg_params.root_cache;
312749c5415SGlauber Costa 	return s->name;
313749c5415SGlauber Costa }
314749c5415SGlauber Costa 
315943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
316943a451aSGlauber Costa {
317943a451aSGlauber Costa 	if (is_root_cache(s))
318943a451aSGlauber Costa 		return s;
319f7ce3190SVladimir Davydov 	return s->memcg_params.root_cache;
320943a451aSGlauber Costa }
3215dfb4175SVladimir Davydov 
3224d96ba35SRoman Gushchin /*
3234d96ba35SRoman Gushchin  * Expects a pointer to a slab page. Please note, that PageSlab() check
3244d96ba35SRoman Gushchin  * isn't sufficient, as it returns true also for tail compound slab pages,
3254d96ba35SRoman Gushchin  * which do not have slab_cache pointer set.
326221ec5c0SRoman Gushchin  * So this function assumes that the page can pass PageSlab() && !PageTail()
327221ec5c0SRoman Gushchin  * check.
328fb2f2b0aSRoman Gushchin  *
329fb2f2b0aSRoman Gushchin  * The kmem_cache can be reparented asynchronously. The caller must ensure
330fb2f2b0aSRoman Gushchin  * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
3314d96ba35SRoman Gushchin  */
3324d96ba35SRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
3334d96ba35SRoman Gushchin {
3344d96ba35SRoman Gushchin 	struct kmem_cache *s;
3354d96ba35SRoman Gushchin 
3364d96ba35SRoman Gushchin 	s = READ_ONCE(page->slab_cache);
3374d96ba35SRoman Gushchin 	if (s && !is_root_cache(s))
338fb2f2b0aSRoman Gushchin 		return READ_ONCE(s->memcg_params.memcg);
3394d96ba35SRoman Gushchin 
3404d96ba35SRoman Gushchin 	return NULL;
3414d96ba35SRoman Gushchin }
3424d96ba35SRoman Gushchin 
3434d96ba35SRoman Gushchin /*
3444d96ba35SRoman Gushchin  * Charge the slab page belonging to the non-root kmem_cache.
3454d96ba35SRoman Gushchin  * Can be called for non-root kmem_caches only.
3464d96ba35SRoman Gushchin  */
347f3ccb2c4SVladimir Davydov static __always_inline int memcg_charge_slab(struct page *page,
348f3ccb2c4SVladimir Davydov 					     gfp_t gfp, int order,
349f3ccb2c4SVladimir Davydov 					     struct kmem_cache *s)
3505dfb4175SVladimir Davydov {
3514d96ba35SRoman Gushchin 	struct mem_cgroup *memcg;
3524d96ba35SRoman Gushchin 	struct lruvec *lruvec;
353f0a3a24bSRoman Gushchin 	int ret;
354f0a3a24bSRoman Gushchin 
355fb2f2b0aSRoman Gushchin 	rcu_read_lock();
356fb2f2b0aSRoman Gushchin 	memcg = READ_ONCE(s->memcg_params.memcg);
357fb2f2b0aSRoman Gushchin 	while (memcg && !css_tryget_online(&memcg->css))
358fb2f2b0aSRoman Gushchin 		memcg = parent_mem_cgroup(memcg);
359fb2f2b0aSRoman Gushchin 	rcu_read_unlock();
360fb2f2b0aSRoman Gushchin 
361fb2f2b0aSRoman Gushchin 	if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
362fb2f2b0aSRoman Gushchin 		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
363fb2f2b0aSRoman Gushchin 				    (1 << order));
364fb2f2b0aSRoman Gushchin 		percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
365fb2f2b0aSRoman Gushchin 		return 0;
366fb2f2b0aSRoman Gushchin 	}
367fb2f2b0aSRoman Gushchin 
3684d96ba35SRoman Gushchin 	ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
369f0a3a24bSRoman Gushchin 	if (ret)
370fb2f2b0aSRoman Gushchin 		goto out;
371f0a3a24bSRoman Gushchin 
3724d96ba35SRoman Gushchin 	lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
3734d96ba35SRoman Gushchin 	mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
3744d96ba35SRoman Gushchin 
3754d96ba35SRoman Gushchin 	/* transer try_charge() page references to kmem_cache */
376f0a3a24bSRoman Gushchin 	percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
3774d96ba35SRoman Gushchin 	css_put_many(&memcg->css, 1 << order);
378fb2f2b0aSRoman Gushchin out:
379fb2f2b0aSRoman Gushchin 	css_put(&memcg->css);
380fb2f2b0aSRoman Gushchin 	return ret;
38127ee57c9SVladimir Davydov }
38227ee57c9SVladimir Davydov 
3834d96ba35SRoman Gushchin /*
3844d96ba35SRoman Gushchin  * Uncharge a slab page belonging to a non-root kmem_cache.
3854d96ba35SRoman Gushchin  * Can be called for non-root kmem_caches only.
3864d96ba35SRoman Gushchin  */
38727ee57c9SVladimir Davydov static __always_inline void memcg_uncharge_slab(struct page *page, int order,
38827ee57c9SVladimir Davydov 						struct kmem_cache *s)
38927ee57c9SVladimir Davydov {
3904d96ba35SRoman Gushchin 	struct mem_cgroup *memcg;
3914d96ba35SRoman Gushchin 	struct lruvec *lruvec;
3924d96ba35SRoman Gushchin 
393fb2f2b0aSRoman Gushchin 	rcu_read_lock();
394fb2f2b0aSRoman Gushchin 	memcg = READ_ONCE(s->memcg_params.memcg);
395fb2f2b0aSRoman Gushchin 	if (likely(!mem_cgroup_is_root(memcg))) {
3964d96ba35SRoman Gushchin 		lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
3974d96ba35SRoman Gushchin 		mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
3984d96ba35SRoman Gushchin 		memcg_kmem_uncharge_memcg(page, order, memcg);
399fb2f2b0aSRoman Gushchin 	} else {
400fb2f2b0aSRoman Gushchin 		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
401fb2f2b0aSRoman Gushchin 				    -(1 << order));
402fb2f2b0aSRoman Gushchin 	}
403fb2f2b0aSRoman Gushchin 	rcu_read_unlock();
4044d96ba35SRoman Gushchin 
405f0a3a24bSRoman Gushchin 	percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
4065dfb4175SVladimir Davydov }
407f7ce3190SVladimir Davydov 
408f7ce3190SVladimir Davydov extern void slab_init_memcg_params(struct kmem_cache *);
409c03914b7SRoman Gushchin extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
410f7ce3190SVladimir Davydov 
41184c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */
412f7ce3190SVladimir Davydov 
413510ded33STejun Heo /* If !memcg, all caches are root. */
414510ded33STejun Heo #define slab_root_caches	slab_caches
415510ded33STejun Heo #define root_caches_node	list
416510ded33STejun Heo 
417426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \
418426589f5SVladimir Davydov 	for ((void)(iter), (void)(root); 0; )
419426589f5SVladimir Davydov 
420ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s)
421ba6c496eSGlauber Costa {
422ba6c496eSGlauber Costa 	return true;
423ba6c496eSGlauber Costa }
424ba6c496eSGlauber Costa 
425b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s,
426b9ce5ef4SGlauber Costa 				      struct kmem_cache *p)
427b9ce5ef4SGlauber Costa {
428598a0717SKees Cook 	return s == p;
429b9ce5ef4SGlauber Costa }
430749c5415SGlauber Costa 
431749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s)
432749c5415SGlauber Costa {
433749c5415SGlauber Costa 	return s->name;
434749c5415SGlauber Costa }
435749c5415SGlauber Costa 
436943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
437943a451aSGlauber Costa {
438943a451aSGlauber Costa 	return s;
439943a451aSGlauber Costa }
4405dfb4175SVladimir Davydov 
4414d96ba35SRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
4424d96ba35SRoman Gushchin {
4434d96ba35SRoman Gushchin 	return NULL;
4444d96ba35SRoman Gushchin }
4454d96ba35SRoman Gushchin 
446f3ccb2c4SVladimir Davydov static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
447f3ccb2c4SVladimir Davydov 				    struct kmem_cache *s)
4485dfb4175SVladimir Davydov {
4495dfb4175SVladimir Davydov 	return 0;
4505dfb4175SVladimir Davydov }
4515dfb4175SVladimir Davydov 
45227ee57c9SVladimir Davydov static inline void memcg_uncharge_slab(struct page *page, int order,
45327ee57c9SVladimir Davydov 				       struct kmem_cache *s)
45427ee57c9SVladimir Davydov {
45527ee57c9SVladimir Davydov }
45627ee57c9SVladimir Davydov 
457f7ce3190SVladimir Davydov static inline void slab_init_memcg_params(struct kmem_cache *s)
458f7ce3190SVladimir Davydov {
459f7ce3190SVladimir Davydov }
460510ded33STejun Heo 
461c03914b7SRoman Gushchin static inline void memcg_link_cache(struct kmem_cache *s,
462c03914b7SRoman Gushchin 				    struct mem_cgroup *memcg)
463510ded33STejun Heo {
464510ded33STejun Heo }
465510ded33STejun Heo 
46684c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */
467b9ce5ef4SGlauber Costa 
468a64b5378SKees Cook static inline struct kmem_cache *virt_to_cache(const void *obj)
469a64b5378SKees Cook {
470a64b5378SKees Cook 	struct page *page;
471a64b5378SKees Cook 
472a64b5378SKees Cook 	page = virt_to_head_page(obj);
473a64b5378SKees Cook 	if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
474a64b5378SKees Cook 					__func__))
475a64b5378SKees Cook 		return NULL;
476a64b5378SKees Cook 	return page->slab_cache;
477a64b5378SKees Cook }
478a64b5378SKees Cook 
4796cea1d56SRoman Gushchin static __always_inline int charge_slab_page(struct page *page,
4806cea1d56SRoman Gushchin 					    gfp_t gfp, int order,
4816cea1d56SRoman Gushchin 					    struct kmem_cache *s)
4826cea1d56SRoman Gushchin {
4834d96ba35SRoman Gushchin 	if (is_root_cache(s)) {
4844d96ba35SRoman Gushchin 		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
4854d96ba35SRoman Gushchin 				    1 << order);
4864d96ba35SRoman Gushchin 		return 0;
4874d96ba35SRoman Gushchin 	}
4886cea1d56SRoman Gushchin 
4894d96ba35SRoman Gushchin 	return memcg_charge_slab(page, gfp, order, s);
4906cea1d56SRoman Gushchin }
4916cea1d56SRoman Gushchin 
4926cea1d56SRoman Gushchin static __always_inline void uncharge_slab_page(struct page *page, int order,
4936cea1d56SRoman Gushchin 					       struct kmem_cache *s)
4946cea1d56SRoman Gushchin {
4954d96ba35SRoman Gushchin 	if (is_root_cache(s)) {
4964d96ba35SRoman Gushchin 		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
4974d96ba35SRoman Gushchin 				    -(1 << order));
4984d96ba35SRoman Gushchin 		return;
4994d96ba35SRoman Gushchin 	}
5004d96ba35SRoman Gushchin 
5016cea1d56SRoman Gushchin 	memcg_uncharge_slab(page, order, s);
5026cea1d56SRoman Gushchin }
5036cea1d56SRoman Gushchin 
504b9ce5ef4SGlauber Costa static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
505b9ce5ef4SGlauber Costa {
506b9ce5ef4SGlauber Costa 	struct kmem_cache *cachep;
507b9ce5ef4SGlauber Costa 
508b9ce5ef4SGlauber Costa 	/*
509b9ce5ef4SGlauber Costa 	 * When kmemcg is not being used, both assignments should return the
510b9ce5ef4SGlauber Costa 	 * same value. but we don't want to pay the assignment price in that
511b9ce5ef4SGlauber Costa 	 * case. If it is not compiled in, the compiler should be smart enough
512b9ce5ef4SGlauber Costa 	 * to not do even the assignment. In that case, slab_equal_or_root
513b9ce5ef4SGlauber Costa 	 * will also be a constant.
514b9ce5ef4SGlauber Costa 	 */
515becfda68SLaura Abbott 	if (!memcg_kmem_enabled() &&
516598a0717SKees Cook 	    !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
517becfda68SLaura Abbott 	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
518b9ce5ef4SGlauber Costa 		return s;
519b9ce5ef4SGlauber Costa 
520a64b5378SKees Cook 	cachep = virt_to_cache(x);
521a64b5378SKees Cook 	WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
522598a0717SKees Cook 		  "%s: Wrong slab cache. %s but object is from %s\n",
5232d16e0fdSDaniel Borkmann 		  __func__, s->name, cachep->name);
524598a0717SKees Cook 	return cachep;
525b9ce5ef4SGlauber Costa }
526ca34956bSChristoph Lameter 
52711c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s)
52811c7aec2SJesper Dangaard Brouer {
52911c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB
53011c7aec2SJesper Dangaard Brouer 	return s->object_size;
53111c7aec2SJesper Dangaard Brouer 
53211c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */
53311c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG
53411c7aec2SJesper Dangaard Brouer 	/*
53511c7aec2SJesper Dangaard Brouer 	 * Debugging requires use of the padding between object
53611c7aec2SJesper Dangaard Brouer 	 * and whatever may come after it.
53711c7aec2SJesper Dangaard Brouer 	 */
53811c7aec2SJesper Dangaard Brouer 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
53911c7aec2SJesper Dangaard Brouer 		return s->object_size;
54011c7aec2SJesper Dangaard Brouer # endif
54180a9201aSAlexander Potapenko 	if (s->flags & SLAB_KASAN)
54280a9201aSAlexander Potapenko 		return s->object_size;
54311c7aec2SJesper Dangaard Brouer 	/*
54411c7aec2SJesper Dangaard Brouer 	 * If we have the need to store the freelist pointer
54511c7aec2SJesper Dangaard Brouer 	 * back there or track user information then we can
54611c7aec2SJesper Dangaard Brouer 	 * only use the space before that information.
54711c7aec2SJesper Dangaard Brouer 	 */
5485f0d5a3aSPaul E. McKenney 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
54911c7aec2SJesper Dangaard Brouer 		return s->inuse;
55011c7aec2SJesper Dangaard Brouer 	/*
55111c7aec2SJesper Dangaard Brouer 	 * Else we can use all the padding etc for the allocation
55211c7aec2SJesper Dangaard Brouer 	 */
55311c7aec2SJesper Dangaard Brouer 	return s->size;
55411c7aec2SJesper Dangaard Brouer #endif
55511c7aec2SJesper Dangaard Brouer }
55611c7aec2SJesper Dangaard Brouer 
55711c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
55811c7aec2SJesper Dangaard Brouer 						     gfp_t flags)
55911c7aec2SJesper Dangaard Brouer {
56011c7aec2SJesper Dangaard Brouer 	flags &= gfp_allowed_mask;
561d92a8cfcSPeter Zijlstra 
562d92a8cfcSPeter Zijlstra 	fs_reclaim_acquire(flags);
563d92a8cfcSPeter Zijlstra 	fs_reclaim_release(flags);
564d92a8cfcSPeter Zijlstra 
56511c7aec2SJesper Dangaard Brouer 	might_sleep_if(gfpflags_allow_blocking(flags));
56611c7aec2SJesper Dangaard Brouer 
567fab9963aSJesper Dangaard Brouer 	if (should_failslab(s, flags))
56811c7aec2SJesper Dangaard Brouer 		return NULL;
56911c7aec2SJesper Dangaard Brouer 
57045264778SVladimir Davydov 	if (memcg_kmem_enabled() &&
57145264778SVladimir Davydov 	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
57245264778SVladimir Davydov 		return memcg_kmem_get_cache(s);
57345264778SVladimir Davydov 
57445264778SVladimir Davydov 	return s;
57511c7aec2SJesper Dangaard Brouer }
57611c7aec2SJesper Dangaard Brouer 
57711c7aec2SJesper Dangaard Brouer static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
57811c7aec2SJesper Dangaard Brouer 					size_t size, void **p)
57911c7aec2SJesper Dangaard Brouer {
58011c7aec2SJesper Dangaard Brouer 	size_t i;
58111c7aec2SJesper Dangaard Brouer 
58211c7aec2SJesper Dangaard Brouer 	flags &= gfp_allowed_mask;
58311c7aec2SJesper Dangaard Brouer 	for (i = 0; i < size; i++) {
58453128245SAndrey Konovalov 		p[i] = kasan_slab_alloc(s, p[i], flags);
585a2f77575SAndrey Konovalov 		/* As p[i] might get tagged, call kmemleak hook after KASAN. */
58653128245SAndrey Konovalov 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
58711c7aec2SJesper Dangaard Brouer 					 s->flags, flags);
58811c7aec2SJesper Dangaard Brouer 	}
58945264778SVladimir Davydov 
59045264778SVladimir Davydov 	if (memcg_kmem_enabled())
59111c7aec2SJesper Dangaard Brouer 		memcg_kmem_put_cache(s);
59211c7aec2SJesper Dangaard Brouer }
59311c7aec2SJesper Dangaard Brouer 
59444c5356fSChristoph Lameter #ifndef CONFIG_SLOB
595ca34956bSChristoph Lameter /*
596ca34956bSChristoph Lameter  * The slab lists for all objects.
597ca34956bSChristoph Lameter  */
598ca34956bSChristoph Lameter struct kmem_cache_node {
599ca34956bSChristoph Lameter 	spinlock_t list_lock;
600ca34956bSChristoph Lameter 
601ca34956bSChristoph Lameter #ifdef CONFIG_SLAB
602ca34956bSChristoph Lameter 	struct list_head slabs_partial;	/* partial list first, better asm code */
603ca34956bSChristoph Lameter 	struct list_head slabs_full;
604ca34956bSChristoph Lameter 	struct list_head slabs_free;
605bf00bd34SDavid Rientjes 	unsigned long total_slabs;	/* length of all slab lists */
606bf00bd34SDavid Rientjes 	unsigned long free_slabs;	/* length of free slab list only */
607ca34956bSChristoph Lameter 	unsigned long free_objects;
608ca34956bSChristoph Lameter 	unsigned int free_limit;
609ca34956bSChristoph Lameter 	unsigned int colour_next;	/* Per-node cache coloring */
610ca34956bSChristoph Lameter 	struct array_cache *shared;	/* shared per node */
611c8522a3aSJoonsoo Kim 	struct alien_cache **alien;	/* on other nodes */
612ca34956bSChristoph Lameter 	unsigned long next_reap;	/* updated without locking */
613ca34956bSChristoph Lameter 	int free_touched;		/* updated without locking */
614ca34956bSChristoph Lameter #endif
615ca34956bSChristoph Lameter 
616ca34956bSChristoph Lameter #ifdef CONFIG_SLUB
617ca34956bSChristoph Lameter 	unsigned long nr_partial;
618ca34956bSChristoph Lameter 	struct list_head partial;
619ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG
620ca34956bSChristoph Lameter 	atomic_long_t nr_slabs;
621ca34956bSChristoph Lameter 	atomic_long_t total_objects;
622ca34956bSChristoph Lameter 	struct list_head full;
623ca34956bSChristoph Lameter #endif
624ca34956bSChristoph Lameter #endif
625ca34956bSChristoph Lameter 
626ca34956bSChristoph Lameter };
627e25839f6SWanpeng Li 
62844c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
62944c5356fSChristoph Lameter {
63044c5356fSChristoph Lameter 	return s->node[node];
63144c5356fSChristoph Lameter }
63244c5356fSChristoph Lameter 
63344c5356fSChristoph Lameter /*
63444c5356fSChristoph Lameter  * Iterator over all nodes. The body will be executed for each node that has
63544c5356fSChristoph Lameter  * a kmem_cache_node structure allocated (which is true for all online nodes)
63644c5356fSChristoph Lameter  */
63744c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \
6389163582cSMikulas Patocka 	for (__node = 0; __node < nr_node_ids; __node++) \
6399163582cSMikulas Patocka 		 if ((__n = get_node(__s, __node)))
64044c5356fSChristoph Lameter 
64144c5356fSChristoph Lameter #endif
64244c5356fSChristoph Lameter 
6431df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos);
644276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos);
645276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p);
646bc2791f8STejun Heo void *memcg_slab_start(struct seq_file *m, loff_t *pos);
647bc2791f8STejun Heo void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
648bc2791f8STejun Heo void memcg_slab_stop(struct seq_file *m, void *p);
649b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p);
6505240ab40SAndrey Ryabinin 
651852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
652852d8be0SYang Shi void dump_unreclaimable_slab(void);
653852d8be0SYang Shi #else
654852d8be0SYang Shi static inline void dump_unreclaimable_slab(void)
655852d8be0SYang Shi {
656852d8be0SYang Shi }
657852d8be0SYang Shi #endif
658852d8be0SYang Shi 
65955834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
66055834c59SAlexander Potapenko 
6617c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM
6627c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
6637c00fce9SThomas Garnier 			gfp_t gfp);
6647c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep);
6657c00fce9SThomas Garnier #else
6667c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep,
6677c00fce9SThomas Garnier 					unsigned int count, gfp_t gfp)
6687c00fce9SThomas Garnier {
6697c00fce9SThomas Garnier 	return 0;
6707c00fce9SThomas Garnier }
6717c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
6727c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */
6737c00fce9SThomas Garnier 
6746471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
6756471384aSAlexander Potapenko {
6766471384aSAlexander Potapenko 	if (static_branch_unlikely(&init_on_alloc)) {
6776471384aSAlexander Potapenko 		if (c->ctor)
6786471384aSAlexander Potapenko 			return false;
6796471384aSAlexander Potapenko 		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
6806471384aSAlexander Potapenko 			return flags & __GFP_ZERO;
6816471384aSAlexander Potapenko 		return true;
6826471384aSAlexander Potapenko 	}
6836471384aSAlexander Potapenko 	return flags & __GFP_ZERO;
6846471384aSAlexander Potapenko }
6856471384aSAlexander Potapenko 
6866471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c)
6876471384aSAlexander Potapenko {
6886471384aSAlexander Potapenko 	if (static_branch_unlikely(&init_on_free))
6896471384aSAlexander Potapenko 		return !(c->ctor ||
6906471384aSAlexander Potapenko 			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
6916471384aSAlexander Potapenko 	return false;
6926471384aSAlexander Potapenko }
6936471384aSAlexander Potapenko 
6945240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */
695