197d06609SChristoph Lameter #ifndef MM_SLAB_H 297d06609SChristoph Lameter #define MM_SLAB_H 397d06609SChristoph Lameter /* 497d06609SChristoph Lameter * Internal slab definitions 597d06609SChristoph Lameter */ 697d06609SChristoph Lameter 707f361b2SJoonsoo Kim #ifdef CONFIG_SLOB 807f361b2SJoonsoo Kim /* 907f361b2SJoonsoo Kim * Common fields provided in kmem_cache by all slab allocators 1007f361b2SJoonsoo Kim * This struct is either used directly by the allocator (SLOB) 1107f361b2SJoonsoo Kim * or the allocator must include definitions for all fields 1207f361b2SJoonsoo Kim * provided in kmem_cache_common in their definition of kmem_cache. 1307f361b2SJoonsoo Kim * 1407f361b2SJoonsoo Kim * Once we can do anonymous structs (C11 standard) we could put a 1507f361b2SJoonsoo Kim * anonymous struct definition in these allocators so that the 1607f361b2SJoonsoo Kim * separate allocations in the kmem_cache structure of SLAB and 1707f361b2SJoonsoo Kim * SLUB is no longer needed. 1807f361b2SJoonsoo Kim */ 1907f361b2SJoonsoo Kim struct kmem_cache { 2007f361b2SJoonsoo Kim unsigned int object_size;/* The original size of the object */ 2107f361b2SJoonsoo Kim unsigned int size; /* The aligned/padded/added on size */ 2207f361b2SJoonsoo Kim unsigned int align; /* Alignment as calculated */ 2307f361b2SJoonsoo Kim unsigned long flags; /* Active flags on the slab */ 2407f361b2SJoonsoo Kim const char *name; /* Slab name for sysfs */ 2507f361b2SJoonsoo Kim int refcount; /* Use counter */ 2607f361b2SJoonsoo Kim void (*ctor)(void *); /* Called on object slot creation */ 2707f361b2SJoonsoo Kim struct list_head list; /* List of all slab caches on the system */ 2807f361b2SJoonsoo Kim }; 2907f361b2SJoonsoo Kim 3007f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */ 3107f361b2SJoonsoo Kim 3207f361b2SJoonsoo Kim #ifdef CONFIG_SLAB 3307f361b2SJoonsoo Kim #include <linux/slab_def.h> 3407f361b2SJoonsoo Kim #endif 3507f361b2SJoonsoo Kim 3607f361b2SJoonsoo Kim #ifdef CONFIG_SLUB 3707f361b2SJoonsoo Kim #include <linux/slub_def.h> 3807f361b2SJoonsoo Kim #endif 3907f361b2SJoonsoo Kim 4007f361b2SJoonsoo Kim #include <linux/memcontrol.h> 4111c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h> 4211c7aec2SJesper Dangaard Brouer #include <linux/kmemcheck.h> 4311c7aec2SJesper Dangaard Brouer #include <linux/kasan.h> 4411c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h> 457c00fce9SThomas Garnier #include <linux/random.h> 4607f361b2SJoonsoo Kim 4797d06609SChristoph Lameter /* 4897d06609SChristoph Lameter * State of the slab allocator. 4997d06609SChristoph Lameter * 5097d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 5197d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 5297d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 5397d06609SChristoph Lameter * allocated from slab caches themselves. 5497d06609SChristoph Lameter */ 5597d06609SChristoph Lameter enum slab_state { 5697d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 5797d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 58ce8eb6c4SChristoph Lameter PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 5997d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 6097d06609SChristoph Lameter FULL /* Everything is working */ 6197d06609SChristoph Lameter }; 6297d06609SChristoph Lameter 6397d06609SChristoph Lameter extern enum slab_state slab_state; 6497d06609SChristoph Lameter 6518004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 6618004c5dSChristoph Lameter extern struct mutex slab_mutex; 679b030cb8SChristoph Lameter 689b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 6918004c5dSChristoph Lameter extern struct list_head slab_caches; 7018004c5dSChristoph Lameter 719b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 729b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 739b030cb8SChristoph Lameter 74*af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */ 75*af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct { 76*af3b5f87SVlastimil Babka const char *name; 77*af3b5f87SVlastimil Babka unsigned long size; 78*af3b5f87SVlastimil Babka } kmalloc_info[]; 79*af3b5f87SVlastimil Babka 8045906855SChristoph Lameter unsigned long calculate_alignment(unsigned long flags, 8145906855SChristoph Lameter unsigned long align, unsigned long size); 8245906855SChristoph Lameter 83f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB 84f97d5f63SChristoph Lameter /* Kmalloc array related functions */ 8534cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void); 86f97d5f63SChristoph Lameter void create_kmalloc_caches(unsigned long); 872c59dd65SChristoph Lameter 882c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */ 892c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t); 90f97d5f63SChristoph Lameter #endif 91f97d5f63SChristoph Lameter 92f97d5f63SChristoph Lameter 939b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 948a13a4ccSChristoph Lameter extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 9597d06609SChristoph Lameter 9645530c44SChristoph Lameter extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 9745530c44SChristoph Lameter unsigned long flags); 9845530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name, 9945530c44SChristoph Lameter size_t size, unsigned long flags); 10045530c44SChristoph Lameter 101423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s); 102423c929cSJoonsoo Kim struct kmem_cache *find_mergeable(size_t size, size_t align, 103423c929cSJoonsoo Kim unsigned long flags, const char *name, void (*ctor)(void *)); 10412220deaSJoonsoo Kim #ifndef CONFIG_SLOB 1052633d7a0SGlauber Costa struct kmem_cache * 106a44cb944SVladimir Davydov __kmem_cache_alias(const char *name, size_t size, size_t align, 107a44cb944SVladimir Davydov unsigned long flags, void (*ctor)(void *)); 108423c929cSJoonsoo Kim 109423c929cSJoonsoo Kim unsigned long kmem_cache_flags(unsigned long object_size, 110423c929cSJoonsoo Kim unsigned long flags, const char *name, 111423c929cSJoonsoo Kim void (*ctor)(void *)); 112cbb79694SChristoph Lameter #else 1132633d7a0SGlauber Costa static inline struct kmem_cache * 114a44cb944SVladimir Davydov __kmem_cache_alias(const char *name, size_t size, size_t align, 115a44cb944SVladimir Davydov unsigned long flags, void (*ctor)(void *)) 116cbb79694SChristoph Lameter { return NULL; } 117423c929cSJoonsoo Kim 118423c929cSJoonsoo Kim static inline unsigned long kmem_cache_flags(unsigned long object_size, 119423c929cSJoonsoo Kim unsigned long flags, const char *name, 120423c929cSJoonsoo Kim void (*ctor)(void *)) 121423c929cSJoonsoo Kim { 122423c929cSJoonsoo Kim return flags; 123423c929cSJoonsoo Kim } 124cbb79694SChristoph Lameter #endif 125cbb79694SChristoph Lameter 126cbb79694SChristoph Lameter 127d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */ 128d8843922SGlauber Costa #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 129d8843922SGlauber Costa SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 130d8843922SGlauber Costa 131d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB) 132d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 133d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG) 134d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 135becfda68SLaura Abbott SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 136d8843922SGlauber Costa #else 137d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0) 138d8843922SGlauber Costa #endif 139d8843922SGlauber Costa 140d8843922SGlauber Costa #if defined(CONFIG_SLAB) 141d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 142230e9fc2SVladimir Davydov SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 143230e9fc2SVladimir Davydov SLAB_NOTRACK | SLAB_ACCOUNT) 144d8843922SGlauber Costa #elif defined(CONFIG_SLUB) 145d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 146230e9fc2SVladimir Davydov SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) 147d8843922SGlauber Costa #else 148d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0) 149d8843922SGlauber Costa #endif 150d8843922SGlauber Costa 151e70954fdSThomas Garnier /* Common flags available with current configuration */ 152d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 153d8843922SGlauber Costa 154e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */ 155e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 156e70954fdSThomas Garnier SLAB_RED_ZONE | \ 157e70954fdSThomas Garnier SLAB_POISON | \ 158e70954fdSThomas Garnier SLAB_STORE_USER | \ 159e70954fdSThomas Garnier SLAB_TRACE | \ 160e70954fdSThomas Garnier SLAB_CONSISTENCY_CHECKS | \ 161e70954fdSThomas Garnier SLAB_MEM_SPREAD | \ 162e70954fdSThomas Garnier SLAB_NOLEAKTRACE | \ 163e70954fdSThomas Garnier SLAB_RECLAIM_ACCOUNT | \ 164e70954fdSThomas Garnier SLAB_TEMPORARY | \ 165e70954fdSThomas Garnier SLAB_NOTRACK | \ 166e70954fdSThomas Garnier SLAB_ACCOUNT) 167e70954fdSThomas Garnier 168945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 16952b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *); 17089e364dbSVladimir Davydov int __kmem_cache_shrink(struct kmem_cache *); 17141a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *); 172945cf2b6SChristoph Lameter 173b7454ad3SGlauber Costa struct seq_file; 174b7454ad3SGlauber Costa struct file; 175b7454ad3SGlauber Costa 1760d7561c6SGlauber Costa struct slabinfo { 1770d7561c6SGlauber Costa unsigned long active_objs; 1780d7561c6SGlauber Costa unsigned long num_objs; 1790d7561c6SGlauber Costa unsigned long active_slabs; 1800d7561c6SGlauber Costa unsigned long num_slabs; 1810d7561c6SGlauber Costa unsigned long shared_avail; 1820d7561c6SGlauber Costa unsigned int limit; 1830d7561c6SGlauber Costa unsigned int batchcount; 1840d7561c6SGlauber Costa unsigned int shared; 1850d7561c6SGlauber Costa unsigned int objects_per_slab; 1860d7561c6SGlauber Costa unsigned int cache_order; 1870d7561c6SGlauber Costa }; 1880d7561c6SGlauber Costa 1890d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 1900d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 191b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 192b7454ad3SGlauber Costa size_t count, loff_t *ppos); 193ba6c496eSGlauber Costa 194484748f0SChristoph Lameter /* 195484748f0SChristoph Lameter * Generic implementation of bulk operations 196484748f0SChristoph Lameter * These are useful for situations in which the allocator cannot 1979f706d68SJesper Dangaard Brouer * perform optimizations. In that case segments of the object listed 198484748f0SChristoph Lameter * may be allocated or freed using these operations. 199484748f0SChristoph Lameter */ 200484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 201865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 202484748f0SChristoph Lameter 203127424c8SJohannes Weiner #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 204426589f5SVladimir Davydov /* 205426589f5SVladimir Davydov * Iterate over all memcg caches of the given root cache. The caller must hold 206426589f5SVladimir Davydov * slab_mutex. 207426589f5SVladimir Davydov */ 208426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \ 209426589f5SVladimir Davydov list_for_each_entry(iter, &(root)->memcg_params.list, \ 210426589f5SVladimir Davydov memcg_params.list) 211426589f5SVladimir Davydov 212ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 213ba6c496eSGlauber Costa { 214f7ce3190SVladimir Davydov return s->memcg_params.is_root_cache; 215ba6c496eSGlauber Costa } 2162633d7a0SGlauber Costa 217b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 218b9ce5ef4SGlauber Costa struct kmem_cache *p) 219b9ce5ef4SGlauber Costa { 220f7ce3190SVladimir Davydov return p == s || p == s->memcg_params.root_cache; 221b9ce5ef4SGlauber Costa } 222749c5415SGlauber Costa 223749c5415SGlauber Costa /* 224749c5415SGlauber Costa * We use suffixes to the name in memcg because we can't have caches 225749c5415SGlauber Costa * created in the system with the same name. But when we print them 226749c5415SGlauber Costa * locally, better refer to them with the base name 227749c5415SGlauber Costa */ 228749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 229749c5415SGlauber Costa { 230749c5415SGlauber Costa if (!is_root_cache(s)) 231f7ce3190SVladimir Davydov s = s->memcg_params.root_cache; 232749c5415SGlauber Costa return s->name; 233749c5415SGlauber Costa } 234749c5415SGlauber Costa 235f8570263SVladimir Davydov /* 236f8570263SVladimir Davydov * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 237f7ce3190SVladimir Davydov * That said the caller must assure the memcg's cache won't go away by either 238f7ce3190SVladimir Davydov * taking a css reference to the owner cgroup, or holding the slab_mutex. 239f8570263SVladimir Davydov */ 2402ade4de8SQiang Huang static inline struct kmem_cache * 2412ade4de8SQiang Huang cache_from_memcg_idx(struct kmem_cache *s, int idx) 242749c5415SGlauber Costa { 243959c8963SVladimir Davydov struct kmem_cache *cachep; 244f7ce3190SVladimir Davydov struct memcg_cache_array *arr; 245f8570263SVladimir Davydov 246f8570263SVladimir Davydov rcu_read_lock(); 247f7ce3190SVladimir Davydov arr = rcu_dereference(s->memcg_params.memcg_caches); 248959c8963SVladimir Davydov 249959c8963SVladimir Davydov /* 250959c8963SVladimir Davydov * Make sure we will access the up-to-date value. The code updating 251959c8963SVladimir Davydov * memcg_caches issues a write barrier to match this (see 252f7ce3190SVladimir Davydov * memcg_create_kmem_cache()). 253959c8963SVladimir Davydov */ 254f7ce3190SVladimir Davydov cachep = lockless_dereference(arr->entries[idx]); 2558df0c2dcSPranith Kumar rcu_read_unlock(); 2568df0c2dcSPranith Kumar 257959c8963SVladimir Davydov return cachep; 258749c5415SGlauber Costa } 259943a451aSGlauber Costa 260943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 261943a451aSGlauber Costa { 262943a451aSGlauber Costa if (is_root_cache(s)) 263943a451aSGlauber Costa return s; 264f7ce3190SVladimir Davydov return s->memcg_params.root_cache; 265943a451aSGlauber Costa } 2665dfb4175SVladimir Davydov 267f3ccb2c4SVladimir Davydov static __always_inline int memcg_charge_slab(struct page *page, 268f3ccb2c4SVladimir Davydov gfp_t gfp, int order, 269f3ccb2c4SVladimir Davydov struct kmem_cache *s) 2705dfb4175SVladimir Davydov { 27127ee57c9SVladimir Davydov int ret; 27227ee57c9SVladimir Davydov 2735dfb4175SVladimir Davydov if (!memcg_kmem_enabled()) 2745dfb4175SVladimir Davydov return 0; 2755dfb4175SVladimir Davydov if (is_root_cache(s)) 2765dfb4175SVladimir Davydov return 0; 27727ee57c9SVladimir Davydov 27845264778SVladimir Davydov ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); 27927ee57c9SVladimir Davydov if (ret) 28027ee57c9SVladimir Davydov return ret; 28127ee57c9SVladimir Davydov 28227ee57c9SVladimir Davydov memcg_kmem_update_page_stat(page, 28327ee57c9SVladimir Davydov (s->flags & SLAB_RECLAIM_ACCOUNT) ? 28427ee57c9SVladimir Davydov MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, 28527ee57c9SVladimir Davydov 1 << order); 28627ee57c9SVladimir Davydov return 0; 28727ee57c9SVladimir Davydov } 28827ee57c9SVladimir Davydov 28927ee57c9SVladimir Davydov static __always_inline void memcg_uncharge_slab(struct page *page, int order, 29027ee57c9SVladimir Davydov struct kmem_cache *s) 29127ee57c9SVladimir Davydov { 29245264778SVladimir Davydov if (!memcg_kmem_enabled()) 29345264778SVladimir Davydov return; 29445264778SVladimir Davydov 29527ee57c9SVladimir Davydov memcg_kmem_update_page_stat(page, 29627ee57c9SVladimir Davydov (s->flags & SLAB_RECLAIM_ACCOUNT) ? 29727ee57c9SVladimir Davydov MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, 29827ee57c9SVladimir Davydov -(1 << order)); 29927ee57c9SVladimir Davydov memcg_kmem_uncharge(page, order); 3005dfb4175SVladimir Davydov } 301f7ce3190SVladimir Davydov 302f7ce3190SVladimir Davydov extern void slab_init_memcg_params(struct kmem_cache *); 303f7ce3190SVladimir Davydov 304127424c8SJohannes Weiner #else /* CONFIG_MEMCG && !CONFIG_SLOB */ 305f7ce3190SVladimir Davydov 306426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \ 307426589f5SVladimir Davydov for ((void)(iter), (void)(root); 0; ) 308426589f5SVladimir Davydov 309ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 310ba6c496eSGlauber Costa { 311ba6c496eSGlauber Costa return true; 312ba6c496eSGlauber Costa } 313ba6c496eSGlauber Costa 314b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 315b9ce5ef4SGlauber Costa struct kmem_cache *p) 316b9ce5ef4SGlauber Costa { 317b9ce5ef4SGlauber Costa return true; 318b9ce5ef4SGlauber Costa } 319749c5415SGlauber Costa 320749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 321749c5415SGlauber Costa { 322749c5415SGlauber Costa return s->name; 323749c5415SGlauber Costa } 324749c5415SGlauber Costa 3252ade4de8SQiang Huang static inline struct kmem_cache * 3262ade4de8SQiang Huang cache_from_memcg_idx(struct kmem_cache *s, int idx) 327749c5415SGlauber Costa { 328749c5415SGlauber Costa return NULL; 329749c5415SGlauber Costa } 330943a451aSGlauber Costa 331943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 332943a451aSGlauber Costa { 333943a451aSGlauber Costa return s; 334943a451aSGlauber Costa } 3355dfb4175SVladimir Davydov 336f3ccb2c4SVladimir Davydov static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 337f3ccb2c4SVladimir Davydov struct kmem_cache *s) 3385dfb4175SVladimir Davydov { 3395dfb4175SVladimir Davydov return 0; 3405dfb4175SVladimir Davydov } 3415dfb4175SVladimir Davydov 34227ee57c9SVladimir Davydov static inline void memcg_uncharge_slab(struct page *page, int order, 34327ee57c9SVladimir Davydov struct kmem_cache *s) 34427ee57c9SVladimir Davydov { 34527ee57c9SVladimir Davydov } 34627ee57c9SVladimir Davydov 347f7ce3190SVladimir Davydov static inline void slab_init_memcg_params(struct kmem_cache *s) 348f7ce3190SVladimir Davydov { 349f7ce3190SVladimir Davydov } 350127424c8SJohannes Weiner #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 351b9ce5ef4SGlauber Costa 352b9ce5ef4SGlauber Costa static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 353b9ce5ef4SGlauber Costa { 354b9ce5ef4SGlauber Costa struct kmem_cache *cachep; 355b9ce5ef4SGlauber Costa struct page *page; 356b9ce5ef4SGlauber Costa 357b9ce5ef4SGlauber Costa /* 358b9ce5ef4SGlauber Costa * When kmemcg is not being used, both assignments should return the 359b9ce5ef4SGlauber Costa * same value. but we don't want to pay the assignment price in that 360b9ce5ef4SGlauber Costa * case. If it is not compiled in, the compiler should be smart enough 361b9ce5ef4SGlauber Costa * to not do even the assignment. In that case, slab_equal_or_root 362b9ce5ef4SGlauber Costa * will also be a constant. 363b9ce5ef4SGlauber Costa */ 364becfda68SLaura Abbott if (!memcg_kmem_enabled() && 365becfda68SLaura Abbott !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) 366b9ce5ef4SGlauber Costa return s; 367b9ce5ef4SGlauber Costa 368b9ce5ef4SGlauber Costa page = virt_to_head_page(x); 369b9ce5ef4SGlauber Costa cachep = page->slab_cache; 370b9ce5ef4SGlauber Costa if (slab_equal_or_root(cachep, s)) 371b9ce5ef4SGlauber Costa return cachep; 372b9ce5ef4SGlauber Costa 373b9ce5ef4SGlauber Costa pr_err("%s: Wrong slab cache. %s but object is from %s\n", 3742d16e0fdSDaniel Borkmann __func__, s->name, cachep->name); 375b9ce5ef4SGlauber Costa WARN_ON_ONCE(1); 376b9ce5ef4SGlauber Costa return s; 377b9ce5ef4SGlauber Costa } 378ca34956bSChristoph Lameter 37911c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s) 38011c7aec2SJesper Dangaard Brouer { 38111c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB 38211c7aec2SJesper Dangaard Brouer return s->object_size; 38311c7aec2SJesper Dangaard Brouer 38411c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */ 38511c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG 38611c7aec2SJesper Dangaard Brouer /* 38711c7aec2SJesper Dangaard Brouer * Debugging requires use of the padding between object 38811c7aec2SJesper Dangaard Brouer * and whatever may come after it. 38911c7aec2SJesper Dangaard Brouer */ 39011c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 39111c7aec2SJesper Dangaard Brouer return s->object_size; 39211c7aec2SJesper Dangaard Brouer # endif 39380a9201aSAlexander Potapenko if (s->flags & SLAB_KASAN) 39480a9201aSAlexander Potapenko return s->object_size; 39511c7aec2SJesper Dangaard Brouer /* 39611c7aec2SJesper Dangaard Brouer * If we have the need to store the freelist pointer 39711c7aec2SJesper Dangaard Brouer * back there or track user information then we can 39811c7aec2SJesper Dangaard Brouer * only use the space before that information. 39911c7aec2SJesper Dangaard Brouer */ 40011c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 40111c7aec2SJesper Dangaard Brouer return s->inuse; 40211c7aec2SJesper Dangaard Brouer /* 40311c7aec2SJesper Dangaard Brouer * Else we can use all the padding etc for the allocation 40411c7aec2SJesper Dangaard Brouer */ 40511c7aec2SJesper Dangaard Brouer return s->size; 40611c7aec2SJesper Dangaard Brouer #endif 40711c7aec2SJesper Dangaard Brouer } 40811c7aec2SJesper Dangaard Brouer 40911c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 41011c7aec2SJesper Dangaard Brouer gfp_t flags) 41111c7aec2SJesper Dangaard Brouer { 41211c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 41311c7aec2SJesper Dangaard Brouer lockdep_trace_alloc(flags); 41411c7aec2SJesper Dangaard Brouer might_sleep_if(gfpflags_allow_blocking(flags)); 41511c7aec2SJesper Dangaard Brouer 416fab9963aSJesper Dangaard Brouer if (should_failslab(s, flags)) 41711c7aec2SJesper Dangaard Brouer return NULL; 41811c7aec2SJesper Dangaard Brouer 41945264778SVladimir Davydov if (memcg_kmem_enabled() && 42045264778SVladimir Davydov ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 42145264778SVladimir Davydov return memcg_kmem_get_cache(s); 42245264778SVladimir Davydov 42345264778SVladimir Davydov return s; 42411c7aec2SJesper Dangaard Brouer } 42511c7aec2SJesper Dangaard Brouer 42611c7aec2SJesper Dangaard Brouer static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 42711c7aec2SJesper Dangaard Brouer size_t size, void **p) 42811c7aec2SJesper Dangaard Brouer { 42911c7aec2SJesper Dangaard Brouer size_t i; 43011c7aec2SJesper Dangaard Brouer 43111c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 43211c7aec2SJesper Dangaard Brouer for (i = 0; i < size; i++) { 43311c7aec2SJesper Dangaard Brouer void *object = p[i]; 43411c7aec2SJesper Dangaard Brouer 43511c7aec2SJesper Dangaard Brouer kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 43611c7aec2SJesper Dangaard Brouer kmemleak_alloc_recursive(object, s->object_size, 1, 43711c7aec2SJesper Dangaard Brouer s->flags, flags); 438505f5dcbSAlexander Potapenko kasan_slab_alloc(s, object, flags); 43911c7aec2SJesper Dangaard Brouer } 44045264778SVladimir Davydov 44145264778SVladimir Davydov if (memcg_kmem_enabled()) 44211c7aec2SJesper Dangaard Brouer memcg_kmem_put_cache(s); 44311c7aec2SJesper Dangaard Brouer } 44411c7aec2SJesper Dangaard Brouer 44544c5356fSChristoph Lameter #ifndef CONFIG_SLOB 446ca34956bSChristoph Lameter /* 447ca34956bSChristoph Lameter * The slab lists for all objects. 448ca34956bSChristoph Lameter */ 449ca34956bSChristoph Lameter struct kmem_cache_node { 450ca34956bSChristoph Lameter spinlock_t list_lock; 451ca34956bSChristoph Lameter 452ca34956bSChristoph Lameter #ifdef CONFIG_SLAB 453ca34956bSChristoph Lameter struct list_head slabs_partial; /* partial list first, better asm code */ 454ca34956bSChristoph Lameter struct list_head slabs_full; 455ca34956bSChristoph Lameter struct list_head slabs_free; 456bf00bd34SDavid Rientjes unsigned long total_slabs; /* length of all slab lists */ 457bf00bd34SDavid Rientjes unsigned long free_slabs; /* length of free slab list only */ 458ca34956bSChristoph Lameter unsigned long free_objects; 459ca34956bSChristoph Lameter unsigned int free_limit; 460ca34956bSChristoph Lameter unsigned int colour_next; /* Per-node cache coloring */ 461ca34956bSChristoph Lameter struct array_cache *shared; /* shared per node */ 462c8522a3aSJoonsoo Kim struct alien_cache **alien; /* on other nodes */ 463ca34956bSChristoph Lameter unsigned long next_reap; /* updated without locking */ 464ca34956bSChristoph Lameter int free_touched; /* updated without locking */ 465ca34956bSChristoph Lameter #endif 466ca34956bSChristoph Lameter 467ca34956bSChristoph Lameter #ifdef CONFIG_SLUB 468ca34956bSChristoph Lameter unsigned long nr_partial; 469ca34956bSChristoph Lameter struct list_head partial; 470ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG 471ca34956bSChristoph Lameter atomic_long_t nr_slabs; 472ca34956bSChristoph Lameter atomic_long_t total_objects; 473ca34956bSChristoph Lameter struct list_head full; 474ca34956bSChristoph Lameter #endif 475ca34956bSChristoph Lameter #endif 476ca34956bSChristoph Lameter 477ca34956bSChristoph Lameter }; 478e25839f6SWanpeng Li 47944c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 48044c5356fSChristoph Lameter { 48144c5356fSChristoph Lameter return s->node[node]; 48244c5356fSChristoph Lameter } 48344c5356fSChristoph Lameter 48444c5356fSChristoph Lameter /* 48544c5356fSChristoph Lameter * Iterator over all nodes. The body will be executed for each node that has 48644c5356fSChristoph Lameter * a kmem_cache_node structure allocated (which is true for all online nodes) 48744c5356fSChristoph Lameter */ 48844c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \ 4899163582cSMikulas Patocka for (__node = 0; __node < nr_node_ids; __node++) \ 4909163582cSMikulas Patocka if ((__n = get_node(__s, __node))) 49144c5356fSChristoph Lameter 49244c5356fSChristoph Lameter #endif 49344c5356fSChristoph Lameter 4941df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos); 495276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos); 496276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p); 497b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p); 4985240ab40SAndrey Ryabinin 49955834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 50055834c59SAlexander Potapenko 5017c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM 5027c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 5037c00fce9SThomas Garnier gfp_t gfp); 5047c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep); 5057c00fce9SThomas Garnier #else 5067c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep, 5077c00fce9SThomas Garnier unsigned int count, gfp_t gfp) 5087c00fce9SThomas Garnier { 5097c00fce9SThomas Garnier return 0; 5107c00fce9SThomas Garnier } 5117c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 5127c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 5137c00fce9SThomas Garnier 5145240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */ 515