1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 297d06609SChristoph Lameter #ifndef MM_SLAB_H 397d06609SChristoph Lameter #define MM_SLAB_H 497d06609SChristoph Lameter /* 597d06609SChristoph Lameter * Internal slab definitions 697d06609SChristoph Lameter */ 797d06609SChristoph Lameter 807f361b2SJoonsoo Kim #ifdef CONFIG_SLOB 907f361b2SJoonsoo Kim /* 1007f361b2SJoonsoo Kim * Common fields provided in kmem_cache by all slab allocators 1107f361b2SJoonsoo Kim * This struct is either used directly by the allocator (SLOB) 1207f361b2SJoonsoo Kim * or the allocator must include definitions for all fields 1307f361b2SJoonsoo Kim * provided in kmem_cache_common in their definition of kmem_cache. 1407f361b2SJoonsoo Kim * 1507f361b2SJoonsoo Kim * Once we can do anonymous structs (C11 standard) we could put a 1607f361b2SJoonsoo Kim * anonymous struct definition in these allocators so that the 1707f361b2SJoonsoo Kim * separate allocations in the kmem_cache structure of SLAB and 1807f361b2SJoonsoo Kim * SLUB is no longer needed. 1907f361b2SJoonsoo Kim */ 2007f361b2SJoonsoo Kim struct kmem_cache { 2107f361b2SJoonsoo Kim unsigned int object_size;/* The original size of the object */ 2207f361b2SJoonsoo Kim unsigned int size; /* The aligned/padded/added on size */ 2307f361b2SJoonsoo Kim unsigned int align; /* Alignment as calculated */ 24d50112edSAlexey Dobriyan slab_flags_t flags; /* Active flags on the slab */ 257bbdb81eSAlexey Dobriyan unsigned int useroffset;/* Usercopy region offset */ 267bbdb81eSAlexey Dobriyan unsigned int usersize; /* Usercopy region size */ 2707f361b2SJoonsoo Kim const char *name; /* Slab name for sysfs */ 2807f361b2SJoonsoo Kim int refcount; /* Use counter */ 2907f361b2SJoonsoo Kim void (*ctor)(void *); /* Called on object slot creation */ 3007f361b2SJoonsoo Kim struct list_head list; /* List of all slab caches on the system */ 3107f361b2SJoonsoo Kim }; 3207f361b2SJoonsoo Kim 339adeaa22SWaiman Long #else /* !CONFIG_SLOB */ 349adeaa22SWaiman Long 359adeaa22SWaiman Long struct memcg_cache_array { 369adeaa22SWaiman Long struct rcu_head rcu; 379adeaa22SWaiman Long struct kmem_cache *entries[0]; 389adeaa22SWaiman Long }; 399adeaa22SWaiman Long 409adeaa22SWaiman Long /* 419adeaa22SWaiman Long * This is the main placeholder for memcg-related information in kmem caches. 429adeaa22SWaiman Long * Both the root cache and the child caches will have it. For the root cache, 439adeaa22SWaiman Long * this will hold a dynamically allocated array large enough to hold 449adeaa22SWaiman Long * information about the currently limited memcgs in the system. To allow the 459adeaa22SWaiman Long * array to be accessed without taking any locks, on relocation we free the old 469adeaa22SWaiman Long * version only after a grace period. 479adeaa22SWaiman Long * 489adeaa22SWaiman Long * Root and child caches hold different metadata. 499adeaa22SWaiman Long * 509adeaa22SWaiman Long * @root_cache: Common to root and child caches. NULL for root, pointer to 519adeaa22SWaiman Long * the root cache for children. 529adeaa22SWaiman Long * 539adeaa22SWaiman Long * The following fields are specific to root caches. 549adeaa22SWaiman Long * 559adeaa22SWaiman Long * @memcg_caches: kmemcg ID indexed table of child caches. This table is 569adeaa22SWaiman Long * used to index child cachces during allocation and cleared 579adeaa22SWaiman Long * early during shutdown. 589adeaa22SWaiman Long * 599adeaa22SWaiman Long * @root_caches_node: List node for slab_root_caches list. 609adeaa22SWaiman Long * 619adeaa22SWaiman Long * @children: List of all child caches. While the child caches are also 629adeaa22SWaiman Long * reachable through @memcg_caches, a child cache remains on 639adeaa22SWaiman Long * this list until it is actually destroyed. 649adeaa22SWaiman Long * 659adeaa22SWaiman Long * The following fields are specific to child caches. 669adeaa22SWaiman Long * 679adeaa22SWaiman Long * @memcg: Pointer to the memcg this cache belongs to. 689adeaa22SWaiman Long * 699adeaa22SWaiman Long * @children_node: List node for @root_cache->children list. 709adeaa22SWaiman Long * 719adeaa22SWaiman Long * @kmem_caches_node: List node for @memcg->kmem_caches list. 729adeaa22SWaiman Long */ 739adeaa22SWaiman Long struct memcg_cache_params { 749adeaa22SWaiman Long struct kmem_cache *root_cache; 759adeaa22SWaiman Long union { 769adeaa22SWaiman Long struct { 779adeaa22SWaiman Long struct memcg_cache_array __rcu *memcg_caches; 789adeaa22SWaiman Long struct list_head __root_caches_node; 799adeaa22SWaiman Long struct list_head children; 809adeaa22SWaiman Long bool dying; 819adeaa22SWaiman Long }; 829adeaa22SWaiman Long struct { 839adeaa22SWaiman Long struct mem_cgroup *memcg; 849adeaa22SWaiman Long struct list_head children_node; 859adeaa22SWaiman Long struct list_head kmem_caches_node; 869adeaa22SWaiman Long struct percpu_ref refcnt; 879adeaa22SWaiman Long 889adeaa22SWaiman Long void (*work_fn)(struct kmem_cache *); 899adeaa22SWaiman Long union { 909adeaa22SWaiman Long struct rcu_head rcu_head; 919adeaa22SWaiman Long struct work_struct work; 929adeaa22SWaiman Long }; 939adeaa22SWaiman Long }; 949adeaa22SWaiman Long }; 959adeaa22SWaiman Long }; 9607f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */ 9707f361b2SJoonsoo Kim 9807f361b2SJoonsoo Kim #ifdef CONFIG_SLAB 9907f361b2SJoonsoo Kim #include <linux/slab_def.h> 10007f361b2SJoonsoo Kim #endif 10107f361b2SJoonsoo Kim 10207f361b2SJoonsoo Kim #ifdef CONFIG_SLUB 10307f361b2SJoonsoo Kim #include <linux/slub_def.h> 10407f361b2SJoonsoo Kim #endif 10507f361b2SJoonsoo Kim 10607f361b2SJoonsoo Kim #include <linux/memcontrol.h> 10711c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h> 10811c7aec2SJesper Dangaard Brouer #include <linux/kasan.h> 10911c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h> 1107c00fce9SThomas Garnier #include <linux/random.h> 111d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h> 11207f361b2SJoonsoo Kim 11397d06609SChristoph Lameter /* 11497d06609SChristoph Lameter * State of the slab allocator. 11597d06609SChristoph Lameter * 11697d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 11797d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 11897d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 11997d06609SChristoph Lameter * allocated from slab caches themselves. 12097d06609SChristoph Lameter */ 12197d06609SChristoph Lameter enum slab_state { 12297d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 12397d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 124ce8eb6c4SChristoph Lameter PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 12597d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 12697d06609SChristoph Lameter FULL /* Everything is working */ 12797d06609SChristoph Lameter }; 12897d06609SChristoph Lameter 12997d06609SChristoph Lameter extern enum slab_state slab_state; 13097d06609SChristoph Lameter 13118004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 13218004c5dSChristoph Lameter extern struct mutex slab_mutex; 1339b030cb8SChristoph Lameter 1349b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 13518004c5dSChristoph Lameter extern struct list_head slab_caches; 13618004c5dSChristoph Lameter 1379b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 1389b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 1399b030cb8SChristoph Lameter 140af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */ 141af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct { 142cb5d9fb3SPengfei Li const char *name[NR_KMALLOC_TYPES]; 14355de8b9cSAlexey Dobriyan unsigned int size; 144af3b5f87SVlastimil Babka } kmalloc_info[]; 145af3b5f87SVlastimil Babka 146f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB 147f97d5f63SChristoph Lameter /* Kmalloc array related functions */ 14834cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void); 149d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t); 1502c59dd65SChristoph Lameter 1512c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */ 1522c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t); 153f97d5f63SChristoph Lameter #endif 154f97d5f63SChristoph Lameter 15544405099SLong Li gfp_t kmalloc_fix_flags(gfp_t flags); 156f97d5f63SChristoph Lameter 1579b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 158d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 15997d06609SChristoph Lameter 16055de8b9cSAlexey Dobriyan struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, 16155de8b9cSAlexey Dobriyan slab_flags_t flags, unsigned int useroffset, 16255de8b9cSAlexey Dobriyan unsigned int usersize); 16345530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name, 164361d575eSAlexey Dobriyan unsigned int size, slab_flags_t flags, 165361d575eSAlexey Dobriyan unsigned int useroffset, unsigned int usersize); 16645530c44SChristoph Lameter 167423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s); 168f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align, 169d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, void (*ctor)(void *)); 17012220deaSJoonsoo Kim #ifndef CONFIG_SLOB 1712633d7a0SGlauber Costa struct kmem_cache * 172f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 173d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *)); 174423c929cSJoonsoo Kim 1750293d1fdSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned int object_size, 176d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, 177423c929cSJoonsoo Kim void (*ctor)(void *)); 178cbb79694SChristoph Lameter #else 1792633d7a0SGlauber Costa static inline struct kmem_cache * 180f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 181d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *)) 182cbb79694SChristoph Lameter { return NULL; } 183423c929cSJoonsoo Kim 1840293d1fdSAlexey Dobriyan static inline slab_flags_t kmem_cache_flags(unsigned int object_size, 185d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, 186423c929cSJoonsoo Kim void (*ctor)(void *)) 187423c929cSJoonsoo Kim { 188423c929cSJoonsoo Kim return flags; 189423c929cSJoonsoo Kim } 190cbb79694SChristoph Lameter #endif 191cbb79694SChristoph Lameter 192cbb79694SChristoph Lameter 193d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */ 1946d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ 1956d6ea1e9SNicolas Boichat SLAB_CACHE_DMA32 | SLAB_PANIC | \ 1965f0d5a3aSPaul E. McKenney SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 197d8843922SGlauber Costa 198d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB) 199d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 200d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG) 201d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 202becfda68SLaura Abbott SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 203d8843922SGlauber Costa #else 204d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0) 205d8843922SGlauber Costa #endif 206d8843922SGlauber Costa 207d8843922SGlauber Costa #if defined(CONFIG_SLAB) 208d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 209230e9fc2SVladimir Davydov SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 21075f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT) 211d8843922SGlauber Costa #elif defined(CONFIG_SLUB) 212d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 21375f296d9SLevin, Alexander (Sasha Levin) SLAB_TEMPORARY | SLAB_ACCOUNT) 214d8843922SGlauber Costa #else 215d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0) 216d8843922SGlauber Costa #endif 217d8843922SGlauber Costa 218e70954fdSThomas Garnier /* Common flags available with current configuration */ 219d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 220d8843922SGlauber Costa 221e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */ 222e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 223e70954fdSThomas Garnier SLAB_RED_ZONE | \ 224e70954fdSThomas Garnier SLAB_POISON | \ 225e70954fdSThomas Garnier SLAB_STORE_USER | \ 226e70954fdSThomas Garnier SLAB_TRACE | \ 227e70954fdSThomas Garnier SLAB_CONSISTENCY_CHECKS | \ 228e70954fdSThomas Garnier SLAB_MEM_SPREAD | \ 229e70954fdSThomas Garnier SLAB_NOLEAKTRACE | \ 230e70954fdSThomas Garnier SLAB_RECLAIM_ACCOUNT | \ 231e70954fdSThomas Garnier SLAB_TEMPORARY | \ 232e70954fdSThomas Garnier SLAB_ACCOUNT) 233e70954fdSThomas Garnier 234f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *); 235945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 23652b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *); 237c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *); 238c9fc5864STejun Heo void __kmemcg_cache_deactivate(struct kmem_cache *s); 23943486694SRoman Gushchin void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s); 24041a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *); 24104f768a3SWaiman Long void kmem_cache_shrink_all(struct kmem_cache *s); 242945cf2b6SChristoph Lameter 243b7454ad3SGlauber Costa struct seq_file; 244b7454ad3SGlauber Costa struct file; 245b7454ad3SGlauber Costa 2460d7561c6SGlauber Costa struct slabinfo { 2470d7561c6SGlauber Costa unsigned long active_objs; 2480d7561c6SGlauber Costa unsigned long num_objs; 2490d7561c6SGlauber Costa unsigned long active_slabs; 2500d7561c6SGlauber Costa unsigned long num_slabs; 2510d7561c6SGlauber Costa unsigned long shared_avail; 2520d7561c6SGlauber Costa unsigned int limit; 2530d7561c6SGlauber Costa unsigned int batchcount; 2540d7561c6SGlauber Costa unsigned int shared; 2550d7561c6SGlauber Costa unsigned int objects_per_slab; 2560d7561c6SGlauber Costa unsigned int cache_order; 2570d7561c6SGlauber Costa }; 2580d7561c6SGlauber Costa 2590d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 2600d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 261b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 262b7454ad3SGlauber Costa size_t count, loff_t *ppos); 263ba6c496eSGlauber Costa 264484748f0SChristoph Lameter /* 265484748f0SChristoph Lameter * Generic implementation of bulk operations 266484748f0SChristoph Lameter * These are useful for situations in which the allocator cannot 2679f706d68SJesper Dangaard Brouer * perform optimizations. In that case segments of the object listed 268484748f0SChristoph Lameter * may be allocated or freed using these operations. 269484748f0SChristoph Lameter */ 270484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 271865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 272484748f0SChristoph Lameter 2736cea1d56SRoman Gushchin static inline int cache_vmstat_idx(struct kmem_cache *s) 2746cea1d56SRoman Gushchin { 2756cea1d56SRoman Gushchin return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 2766cea1d56SRoman Gushchin NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE; 2776cea1d56SRoman Gushchin } 2786cea1d56SRoman Gushchin 279*e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG 280*e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG_ON 281*e42f174eSVlastimil Babka DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); 282*e42f174eSVlastimil Babka #else 283*e42f174eSVlastimil Babka DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); 284*e42f174eSVlastimil Babka #endif 285*e42f174eSVlastimil Babka extern void print_tracking(struct kmem_cache *s, void *object); 286*e42f174eSVlastimil Babka #else 287*e42f174eSVlastimil Babka static inline void print_tracking(struct kmem_cache *s, void *object) 288*e42f174eSVlastimil Babka { 289*e42f174eSVlastimil Babka } 290*e42f174eSVlastimil Babka #endif 291*e42f174eSVlastimil Babka 292*e42f174eSVlastimil Babka /* 293*e42f174eSVlastimil Babka * Returns true if any of the specified slub_debug flags is enabled for the 294*e42f174eSVlastimil Babka * cache. Use only for flags parsed by setup_slub_debug() as it also enables 295*e42f174eSVlastimil Babka * the static key. 296*e42f174eSVlastimil Babka */ 297*e42f174eSVlastimil Babka static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) 298*e42f174eSVlastimil Babka { 299*e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG 300*e42f174eSVlastimil Babka VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); 301*e42f174eSVlastimil Babka if (static_branch_unlikely(&slub_debug_enabled)) 302*e42f174eSVlastimil Babka return s->flags & flags; 303*e42f174eSVlastimil Babka #endif 304*e42f174eSVlastimil Babka return false; 305*e42f174eSVlastimil Babka } 306*e42f174eSVlastimil Babka 30784c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 308510ded33STejun Heo 309510ded33STejun Heo /* List of all root caches. */ 310510ded33STejun Heo extern struct list_head slab_root_caches; 311510ded33STejun Heo #define root_caches_node memcg_params.__root_caches_node 312510ded33STejun Heo 313426589f5SVladimir Davydov /* 314426589f5SVladimir Davydov * Iterate over all memcg caches of the given root cache. The caller must hold 315426589f5SVladimir Davydov * slab_mutex. 316426589f5SVladimir Davydov */ 317426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \ 3189eeadc8bSTejun Heo list_for_each_entry(iter, &(root)->memcg_params.children, \ 3199eeadc8bSTejun Heo memcg_params.children_node) 320426589f5SVladimir Davydov 321ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 322ba6c496eSGlauber Costa { 3239eeadc8bSTejun Heo return !s->memcg_params.root_cache; 324ba6c496eSGlauber Costa } 3252633d7a0SGlauber Costa 326b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 327b9ce5ef4SGlauber Costa struct kmem_cache *p) 328b9ce5ef4SGlauber Costa { 329f7ce3190SVladimir Davydov return p == s || p == s->memcg_params.root_cache; 330b9ce5ef4SGlauber Costa } 331749c5415SGlauber Costa 332749c5415SGlauber Costa /* 333749c5415SGlauber Costa * We use suffixes to the name in memcg because we can't have caches 334749c5415SGlauber Costa * created in the system with the same name. But when we print them 335749c5415SGlauber Costa * locally, better refer to them with the base name 336749c5415SGlauber Costa */ 337749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 338749c5415SGlauber Costa { 339749c5415SGlauber Costa if (!is_root_cache(s)) 340f7ce3190SVladimir Davydov s = s->memcg_params.root_cache; 341749c5415SGlauber Costa return s->name; 342749c5415SGlauber Costa } 343749c5415SGlauber Costa 344943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 345943a451aSGlauber Costa { 346943a451aSGlauber Costa if (is_root_cache(s)) 347943a451aSGlauber Costa return s; 348f7ce3190SVladimir Davydov return s->memcg_params.root_cache; 349943a451aSGlauber Costa } 3505dfb4175SVladimir Davydov 3514d96ba35SRoman Gushchin /* 3524d96ba35SRoman Gushchin * Expects a pointer to a slab page. Please note, that PageSlab() check 3534d96ba35SRoman Gushchin * isn't sufficient, as it returns true also for tail compound slab pages, 3544d96ba35SRoman Gushchin * which do not have slab_cache pointer set. 355221ec5c0SRoman Gushchin * So this function assumes that the page can pass PageSlab() && !PageTail() 356221ec5c0SRoman Gushchin * check. 357fb2f2b0aSRoman Gushchin * 358fb2f2b0aSRoman Gushchin * The kmem_cache can be reparented asynchronously. The caller must ensure 359fb2f2b0aSRoman Gushchin * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex. 3604d96ba35SRoman Gushchin */ 3614d96ba35SRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) 3624d96ba35SRoman Gushchin { 3634d96ba35SRoman Gushchin struct kmem_cache *s; 3644d96ba35SRoman Gushchin 3654d96ba35SRoman Gushchin s = READ_ONCE(page->slab_cache); 3664d96ba35SRoman Gushchin if (s && !is_root_cache(s)) 367fb2f2b0aSRoman Gushchin return READ_ONCE(s->memcg_params.memcg); 3684d96ba35SRoman Gushchin 3694d96ba35SRoman Gushchin return NULL; 3704d96ba35SRoman Gushchin } 3714d96ba35SRoman Gushchin 3724d96ba35SRoman Gushchin /* 3734d96ba35SRoman Gushchin * Charge the slab page belonging to the non-root kmem_cache. 3744d96ba35SRoman Gushchin * Can be called for non-root kmem_caches only. 3754d96ba35SRoman Gushchin */ 376f3ccb2c4SVladimir Davydov static __always_inline int memcg_charge_slab(struct page *page, 377f3ccb2c4SVladimir Davydov gfp_t gfp, int order, 378f3ccb2c4SVladimir Davydov struct kmem_cache *s) 3795dfb4175SVladimir Davydov { 380d7670879SWaiman Long int nr_pages = 1 << order; 3814d96ba35SRoman Gushchin struct mem_cgroup *memcg; 3824d96ba35SRoman Gushchin struct lruvec *lruvec; 383f0a3a24bSRoman Gushchin int ret; 384f0a3a24bSRoman Gushchin 385fb2f2b0aSRoman Gushchin rcu_read_lock(); 386fb2f2b0aSRoman Gushchin memcg = READ_ONCE(s->memcg_params.memcg); 387fb2f2b0aSRoman Gushchin while (memcg && !css_tryget_online(&memcg->css)) 388fb2f2b0aSRoman Gushchin memcg = parent_mem_cgroup(memcg); 389fb2f2b0aSRoman Gushchin rcu_read_unlock(); 390fb2f2b0aSRoman Gushchin 391fb2f2b0aSRoman Gushchin if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { 392fb2f2b0aSRoman Gushchin mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), 3939c315e4dSRoman Gushchin nr_pages); 3949c315e4dSRoman Gushchin percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages); 395fb2f2b0aSRoman Gushchin return 0; 396fb2f2b0aSRoman Gushchin } 397fb2f2b0aSRoman Gushchin 3984b13f64dSRoman Gushchin ret = memcg_kmem_charge(memcg, gfp, nr_pages); 399f0a3a24bSRoman Gushchin if (ret) 400fb2f2b0aSRoman Gushchin goto out; 401f0a3a24bSRoman Gushchin 402867e5e1dSJohannes Weiner lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); 4039c315e4dSRoman Gushchin mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages); 4044d96ba35SRoman Gushchin 4054d96ba35SRoman Gushchin /* transer try_charge() page references to kmem_cache */ 4069c315e4dSRoman Gushchin percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages); 4079c315e4dSRoman Gushchin css_put_many(&memcg->css, nr_pages); 408fb2f2b0aSRoman Gushchin out: 409fb2f2b0aSRoman Gushchin css_put(&memcg->css); 410fb2f2b0aSRoman Gushchin return ret; 41127ee57c9SVladimir Davydov } 41227ee57c9SVladimir Davydov 4134d96ba35SRoman Gushchin /* 4144d96ba35SRoman Gushchin * Uncharge a slab page belonging to a non-root kmem_cache. 4154d96ba35SRoman Gushchin * Can be called for non-root kmem_caches only. 4164d96ba35SRoman Gushchin */ 41727ee57c9SVladimir Davydov static __always_inline void memcg_uncharge_slab(struct page *page, int order, 41827ee57c9SVladimir Davydov struct kmem_cache *s) 41927ee57c9SVladimir Davydov { 420d7670879SWaiman Long int nr_pages = 1 << order; 4214d96ba35SRoman Gushchin struct mem_cgroup *memcg; 4224d96ba35SRoman Gushchin struct lruvec *lruvec; 4234d96ba35SRoman Gushchin 424fb2f2b0aSRoman Gushchin rcu_read_lock(); 425fb2f2b0aSRoman Gushchin memcg = READ_ONCE(s->memcg_params.memcg); 426fb2f2b0aSRoman Gushchin if (likely(!mem_cgroup_is_root(memcg))) { 427867e5e1dSJohannes Weiner lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); 4289c315e4dSRoman Gushchin mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages); 4294b13f64dSRoman Gushchin memcg_kmem_uncharge(memcg, nr_pages); 430fb2f2b0aSRoman Gushchin } else { 431fb2f2b0aSRoman Gushchin mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), 4329c315e4dSRoman Gushchin -nr_pages); 433fb2f2b0aSRoman Gushchin } 434fb2f2b0aSRoman Gushchin rcu_read_unlock(); 4354d96ba35SRoman Gushchin 4369c315e4dSRoman Gushchin percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages); 4375dfb4175SVladimir Davydov } 438f7ce3190SVladimir Davydov 439f7ce3190SVladimir Davydov extern void slab_init_memcg_params(struct kmem_cache *); 440c03914b7SRoman Gushchin extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); 441f7ce3190SVladimir Davydov 44284c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */ 443f7ce3190SVladimir Davydov 444510ded33STejun Heo /* If !memcg, all caches are root. */ 445510ded33STejun Heo #define slab_root_caches slab_caches 446510ded33STejun Heo #define root_caches_node list 447510ded33STejun Heo 448426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \ 449426589f5SVladimir Davydov for ((void)(iter), (void)(root); 0; ) 450426589f5SVladimir Davydov 451ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 452ba6c496eSGlauber Costa { 453ba6c496eSGlauber Costa return true; 454ba6c496eSGlauber Costa } 455ba6c496eSGlauber Costa 456b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 457b9ce5ef4SGlauber Costa struct kmem_cache *p) 458b9ce5ef4SGlauber Costa { 459598a0717SKees Cook return s == p; 460b9ce5ef4SGlauber Costa } 461749c5415SGlauber Costa 462749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 463749c5415SGlauber Costa { 464749c5415SGlauber Costa return s->name; 465749c5415SGlauber Costa } 466749c5415SGlauber Costa 467943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 468943a451aSGlauber Costa { 469943a451aSGlauber Costa return s; 470943a451aSGlauber Costa } 4715dfb4175SVladimir Davydov 4724d96ba35SRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) 4734d96ba35SRoman Gushchin { 4744d96ba35SRoman Gushchin return NULL; 4754d96ba35SRoman Gushchin } 4764d96ba35SRoman Gushchin 477f3ccb2c4SVladimir Davydov static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 478f3ccb2c4SVladimir Davydov struct kmem_cache *s) 4795dfb4175SVladimir Davydov { 4805dfb4175SVladimir Davydov return 0; 4815dfb4175SVladimir Davydov } 4825dfb4175SVladimir Davydov 48327ee57c9SVladimir Davydov static inline void memcg_uncharge_slab(struct page *page, int order, 48427ee57c9SVladimir Davydov struct kmem_cache *s) 48527ee57c9SVladimir Davydov { 48627ee57c9SVladimir Davydov } 48727ee57c9SVladimir Davydov 488f7ce3190SVladimir Davydov static inline void slab_init_memcg_params(struct kmem_cache *s) 489f7ce3190SVladimir Davydov { 490f7ce3190SVladimir Davydov } 491510ded33STejun Heo 492c03914b7SRoman Gushchin static inline void memcg_link_cache(struct kmem_cache *s, 493c03914b7SRoman Gushchin struct mem_cgroup *memcg) 494510ded33STejun Heo { 495510ded33STejun Heo } 496510ded33STejun Heo 49784c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */ 498b9ce5ef4SGlauber Costa 499a64b5378SKees Cook static inline struct kmem_cache *virt_to_cache(const void *obj) 500a64b5378SKees Cook { 501a64b5378SKees Cook struct page *page; 502a64b5378SKees Cook 503a64b5378SKees Cook page = virt_to_head_page(obj); 504a64b5378SKees Cook if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n", 505a64b5378SKees Cook __func__)) 506a64b5378SKees Cook return NULL; 507a64b5378SKees Cook return page->slab_cache; 508a64b5378SKees Cook } 509a64b5378SKees Cook 5106cea1d56SRoman Gushchin static __always_inline int charge_slab_page(struct page *page, 5116cea1d56SRoman Gushchin gfp_t gfp, int order, 5126cea1d56SRoman Gushchin struct kmem_cache *s) 5136cea1d56SRoman Gushchin { 5144d96ba35SRoman Gushchin if (is_root_cache(s)) { 5154d96ba35SRoman Gushchin mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), 5164d96ba35SRoman Gushchin 1 << order); 5174d96ba35SRoman Gushchin return 0; 5184d96ba35SRoman Gushchin } 5196cea1d56SRoman Gushchin 5204d96ba35SRoman Gushchin return memcg_charge_slab(page, gfp, order, s); 5216cea1d56SRoman Gushchin } 5226cea1d56SRoman Gushchin 5236cea1d56SRoman Gushchin static __always_inline void uncharge_slab_page(struct page *page, int order, 5246cea1d56SRoman Gushchin struct kmem_cache *s) 5256cea1d56SRoman Gushchin { 5264d96ba35SRoman Gushchin if (is_root_cache(s)) { 5274d96ba35SRoman Gushchin mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), 5284d96ba35SRoman Gushchin -(1 << order)); 5294d96ba35SRoman Gushchin return; 5304d96ba35SRoman Gushchin } 5314d96ba35SRoman Gushchin 5326cea1d56SRoman Gushchin memcg_uncharge_slab(page, order, s); 5336cea1d56SRoman Gushchin } 5346cea1d56SRoman Gushchin 535*e42f174eSVlastimil Babka static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 536*e42f174eSVlastimil Babka { 537*e42f174eSVlastimil Babka struct kmem_cache *cachep; 538*e42f174eSVlastimil Babka 539*e42f174eSVlastimil Babka if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 540*e42f174eSVlastimil Babka !memcg_kmem_enabled() && 541*e42f174eSVlastimil Babka !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 542*e42f174eSVlastimil Babka return s; 543*e42f174eSVlastimil Babka 544*e42f174eSVlastimil Babka cachep = virt_to_cache(x); 545*e42f174eSVlastimil Babka if (WARN(cachep && !slab_equal_or_root(cachep, s), 546*e42f174eSVlastimil Babka "%s: Wrong slab cache. %s but object is from %s\n", 547*e42f174eSVlastimil Babka __func__, s->name, cachep->name)) 548*e42f174eSVlastimil Babka print_tracking(cachep, x); 549*e42f174eSVlastimil Babka return cachep; 550*e42f174eSVlastimil Babka } 551*e42f174eSVlastimil Babka 55211c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s) 55311c7aec2SJesper Dangaard Brouer { 55411c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB 55511c7aec2SJesper Dangaard Brouer return s->object_size; 55611c7aec2SJesper Dangaard Brouer 55711c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */ 55811c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG 55911c7aec2SJesper Dangaard Brouer /* 56011c7aec2SJesper Dangaard Brouer * Debugging requires use of the padding between object 56111c7aec2SJesper Dangaard Brouer * and whatever may come after it. 56211c7aec2SJesper Dangaard Brouer */ 56311c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 56411c7aec2SJesper Dangaard Brouer return s->object_size; 56511c7aec2SJesper Dangaard Brouer # endif 56680a9201aSAlexander Potapenko if (s->flags & SLAB_KASAN) 56780a9201aSAlexander Potapenko return s->object_size; 56811c7aec2SJesper Dangaard Brouer /* 56911c7aec2SJesper Dangaard Brouer * If we have the need to store the freelist pointer 57011c7aec2SJesper Dangaard Brouer * back there or track user information then we can 57111c7aec2SJesper Dangaard Brouer * only use the space before that information. 57211c7aec2SJesper Dangaard Brouer */ 5735f0d5a3aSPaul E. McKenney if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 57411c7aec2SJesper Dangaard Brouer return s->inuse; 57511c7aec2SJesper Dangaard Brouer /* 57611c7aec2SJesper Dangaard Brouer * Else we can use all the padding etc for the allocation 57711c7aec2SJesper Dangaard Brouer */ 57811c7aec2SJesper Dangaard Brouer return s->size; 57911c7aec2SJesper Dangaard Brouer #endif 58011c7aec2SJesper Dangaard Brouer } 58111c7aec2SJesper Dangaard Brouer 58211c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 58311c7aec2SJesper Dangaard Brouer gfp_t flags) 58411c7aec2SJesper Dangaard Brouer { 58511c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 586d92a8cfcSPeter Zijlstra 587d92a8cfcSPeter Zijlstra fs_reclaim_acquire(flags); 588d92a8cfcSPeter Zijlstra fs_reclaim_release(flags); 589d92a8cfcSPeter Zijlstra 59011c7aec2SJesper Dangaard Brouer might_sleep_if(gfpflags_allow_blocking(flags)); 59111c7aec2SJesper Dangaard Brouer 592fab9963aSJesper Dangaard Brouer if (should_failslab(s, flags)) 59311c7aec2SJesper Dangaard Brouer return NULL; 59411c7aec2SJesper Dangaard Brouer 59545264778SVladimir Davydov if (memcg_kmem_enabled() && 59645264778SVladimir Davydov ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 59745264778SVladimir Davydov return memcg_kmem_get_cache(s); 59845264778SVladimir Davydov 59945264778SVladimir Davydov return s; 60011c7aec2SJesper Dangaard Brouer } 60111c7aec2SJesper Dangaard Brouer 60211c7aec2SJesper Dangaard Brouer static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 60311c7aec2SJesper Dangaard Brouer size_t size, void **p) 60411c7aec2SJesper Dangaard Brouer { 60511c7aec2SJesper Dangaard Brouer size_t i; 60611c7aec2SJesper Dangaard Brouer 60711c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 60811c7aec2SJesper Dangaard Brouer for (i = 0; i < size; i++) { 60953128245SAndrey Konovalov p[i] = kasan_slab_alloc(s, p[i], flags); 610a2f77575SAndrey Konovalov /* As p[i] might get tagged, call kmemleak hook after KASAN. */ 61153128245SAndrey Konovalov kmemleak_alloc_recursive(p[i], s->object_size, 1, 61211c7aec2SJesper Dangaard Brouer s->flags, flags); 61311c7aec2SJesper Dangaard Brouer } 61445264778SVladimir Davydov 61545264778SVladimir Davydov if (memcg_kmem_enabled()) 61611c7aec2SJesper Dangaard Brouer memcg_kmem_put_cache(s); 61711c7aec2SJesper Dangaard Brouer } 61811c7aec2SJesper Dangaard Brouer 61944c5356fSChristoph Lameter #ifndef CONFIG_SLOB 620ca34956bSChristoph Lameter /* 621ca34956bSChristoph Lameter * The slab lists for all objects. 622ca34956bSChristoph Lameter */ 623ca34956bSChristoph Lameter struct kmem_cache_node { 624ca34956bSChristoph Lameter spinlock_t list_lock; 625ca34956bSChristoph Lameter 626ca34956bSChristoph Lameter #ifdef CONFIG_SLAB 627ca34956bSChristoph Lameter struct list_head slabs_partial; /* partial list first, better asm code */ 628ca34956bSChristoph Lameter struct list_head slabs_full; 629ca34956bSChristoph Lameter struct list_head slabs_free; 630bf00bd34SDavid Rientjes unsigned long total_slabs; /* length of all slab lists */ 631bf00bd34SDavid Rientjes unsigned long free_slabs; /* length of free slab list only */ 632ca34956bSChristoph Lameter unsigned long free_objects; 633ca34956bSChristoph Lameter unsigned int free_limit; 634ca34956bSChristoph Lameter unsigned int colour_next; /* Per-node cache coloring */ 635ca34956bSChristoph Lameter struct array_cache *shared; /* shared per node */ 636c8522a3aSJoonsoo Kim struct alien_cache **alien; /* on other nodes */ 637ca34956bSChristoph Lameter unsigned long next_reap; /* updated without locking */ 638ca34956bSChristoph Lameter int free_touched; /* updated without locking */ 639ca34956bSChristoph Lameter #endif 640ca34956bSChristoph Lameter 641ca34956bSChristoph Lameter #ifdef CONFIG_SLUB 642ca34956bSChristoph Lameter unsigned long nr_partial; 643ca34956bSChristoph Lameter struct list_head partial; 644ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG 645ca34956bSChristoph Lameter atomic_long_t nr_slabs; 646ca34956bSChristoph Lameter atomic_long_t total_objects; 647ca34956bSChristoph Lameter struct list_head full; 648ca34956bSChristoph Lameter #endif 649ca34956bSChristoph Lameter #endif 650ca34956bSChristoph Lameter 651ca34956bSChristoph Lameter }; 652e25839f6SWanpeng Li 65344c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 65444c5356fSChristoph Lameter { 65544c5356fSChristoph Lameter return s->node[node]; 65644c5356fSChristoph Lameter } 65744c5356fSChristoph Lameter 65844c5356fSChristoph Lameter /* 65944c5356fSChristoph Lameter * Iterator over all nodes. The body will be executed for each node that has 66044c5356fSChristoph Lameter * a kmem_cache_node structure allocated (which is true for all online nodes) 66144c5356fSChristoph Lameter */ 66244c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \ 6639163582cSMikulas Patocka for (__node = 0; __node < nr_node_ids; __node++) \ 6649163582cSMikulas Patocka if ((__n = get_node(__s, __node))) 66544c5356fSChristoph Lameter 66644c5356fSChristoph Lameter #endif 66744c5356fSChristoph Lameter 6681df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos); 669276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos); 670276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p); 671bc2791f8STejun Heo void *memcg_slab_start(struct seq_file *m, loff_t *pos); 672bc2791f8STejun Heo void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); 673bc2791f8STejun Heo void memcg_slab_stop(struct seq_file *m, void *p); 674b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p); 6755240ab40SAndrey Ryabinin 676852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 677852d8be0SYang Shi void dump_unreclaimable_slab(void); 678852d8be0SYang Shi #else 679852d8be0SYang Shi static inline void dump_unreclaimable_slab(void) 680852d8be0SYang Shi { 681852d8be0SYang Shi } 682852d8be0SYang Shi #endif 683852d8be0SYang Shi 68455834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 68555834c59SAlexander Potapenko 6867c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM 6877c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 6887c00fce9SThomas Garnier gfp_t gfp); 6897c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep); 6907c00fce9SThomas Garnier #else 6917c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep, 6927c00fce9SThomas Garnier unsigned int count, gfp_t gfp) 6937c00fce9SThomas Garnier { 6947c00fce9SThomas Garnier return 0; 6957c00fce9SThomas Garnier } 6967c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 6977c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 6987c00fce9SThomas Garnier 6996471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) 7006471384aSAlexander Potapenko { 7016471384aSAlexander Potapenko if (static_branch_unlikely(&init_on_alloc)) { 7026471384aSAlexander Potapenko if (c->ctor) 7036471384aSAlexander Potapenko return false; 7046471384aSAlexander Potapenko if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) 7056471384aSAlexander Potapenko return flags & __GFP_ZERO; 7066471384aSAlexander Potapenko return true; 7076471384aSAlexander Potapenko } 7086471384aSAlexander Potapenko return flags & __GFP_ZERO; 7096471384aSAlexander Potapenko } 7106471384aSAlexander Potapenko 7116471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c) 7126471384aSAlexander Potapenko { 7136471384aSAlexander Potapenko if (static_branch_unlikely(&init_on_free)) 7146471384aSAlexander Potapenko return !(c->ctor || 7156471384aSAlexander Potapenko (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); 7166471384aSAlexander Potapenko return false; 7176471384aSAlexander Potapenko } 7186471384aSAlexander Potapenko 7195240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */ 720