197d06609SChristoph Lameter #ifndef MM_SLAB_H 297d06609SChristoph Lameter #define MM_SLAB_H 397d06609SChristoph Lameter /* 497d06609SChristoph Lameter * Internal slab definitions 597d06609SChristoph Lameter */ 697d06609SChristoph Lameter 707f361b2SJoonsoo Kim #ifdef CONFIG_SLOB 807f361b2SJoonsoo Kim /* 907f361b2SJoonsoo Kim * Common fields provided in kmem_cache by all slab allocators 1007f361b2SJoonsoo Kim * This struct is either used directly by the allocator (SLOB) 1107f361b2SJoonsoo Kim * or the allocator must include definitions for all fields 1207f361b2SJoonsoo Kim * provided in kmem_cache_common in their definition of kmem_cache. 1307f361b2SJoonsoo Kim * 1407f361b2SJoonsoo Kim * Once we can do anonymous structs (C11 standard) we could put a 1507f361b2SJoonsoo Kim * anonymous struct definition in these allocators so that the 1607f361b2SJoonsoo Kim * separate allocations in the kmem_cache structure of SLAB and 1707f361b2SJoonsoo Kim * SLUB is no longer needed. 1807f361b2SJoonsoo Kim */ 1907f361b2SJoonsoo Kim struct kmem_cache { 2007f361b2SJoonsoo Kim unsigned int object_size;/* The original size of the object */ 2107f361b2SJoonsoo Kim unsigned int size; /* The aligned/padded/added on size */ 2207f361b2SJoonsoo Kim unsigned int align; /* Alignment as calculated */ 2307f361b2SJoonsoo Kim unsigned long flags; /* Active flags on the slab */ 2407f361b2SJoonsoo Kim const char *name; /* Slab name for sysfs */ 2507f361b2SJoonsoo Kim int refcount; /* Use counter */ 2607f361b2SJoonsoo Kim void (*ctor)(void *); /* Called on object slot creation */ 2707f361b2SJoonsoo Kim struct list_head list; /* List of all slab caches on the system */ 2807f361b2SJoonsoo Kim }; 2907f361b2SJoonsoo Kim 3007f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */ 3107f361b2SJoonsoo Kim 3207f361b2SJoonsoo Kim #ifdef CONFIG_SLAB 3307f361b2SJoonsoo Kim #include <linux/slab_def.h> 3407f361b2SJoonsoo Kim #endif 3507f361b2SJoonsoo Kim 3607f361b2SJoonsoo Kim #ifdef CONFIG_SLUB 3707f361b2SJoonsoo Kim #include <linux/slub_def.h> 3807f361b2SJoonsoo Kim #endif 3907f361b2SJoonsoo Kim 4007f361b2SJoonsoo Kim #include <linux/memcontrol.h> 4111c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h> 4211c7aec2SJesper Dangaard Brouer #include <linux/kmemcheck.h> 4311c7aec2SJesper Dangaard Brouer #include <linux/kasan.h> 4411c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h> 457c00fce9SThomas Garnier #include <linux/random.h> 4607f361b2SJoonsoo Kim 4797d06609SChristoph Lameter /* 4897d06609SChristoph Lameter * State of the slab allocator. 4997d06609SChristoph Lameter * 5097d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 5197d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 5297d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 5397d06609SChristoph Lameter * allocated from slab caches themselves. 5497d06609SChristoph Lameter */ 5597d06609SChristoph Lameter enum slab_state { 5697d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 5797d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 58ce8eb6c4SChristoph Lameter PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 5997d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 6097d06609SChristoph Lameter FULL /* Everything is working */ 6197d06609SChristoph Lameter }; 6297d06609SChristoph Lameter 6397d06609SChristoph Lameter extern enum slab_state slab_state; 6497d06609SChristoph Lameter 6518004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 6618004c5dSChristoph Lameter extern struct mutex slab_mutex; 679b030cb8SChristoph Lameter 689b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 6918004c5dSChristoph Lameter extern struct list_head slab_caches; 7018004c5dSChristoph Lameter 719b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 729b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 739b030cb8SChristoph Lameter 74af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */ 75af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct { 76af3b5f87SVlastimil Babka const char *name; 77af3b5f87SVlastimil Babka unsigned long size; 78af3b5f87SVlastimil Babka } kmalloc_info[]; 79af3b5f87SVlastimil Babka 8045906855SChristoph Lameter unsigned long calculate_alignment(unsigned long flags, 8145906855SChristoph Lameter unsigned long align, unsigned long size); 8245906855SChristoph Lameter 83f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB 84f97d5f63SChristoph Lameter /* Kmalloc array related functions */ 8534cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void); 86f97d5f63SChristoph Lameter void create_kmalloc_caches(unsigned long); 872c59dd65SChristoph Lameter 882c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */ 892c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t); 90f97d5f63SChristoph Lameter #endif 91f97d5f63SChristoph Lameter 92f97d5f63SChristoph Lameter 939b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 948a13a4ccSChristoph Lameter extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 9597d06609SChristoph Lameter 9645530c44SChristoph Lameter extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 9745530c44SChristoph Lameter unsigned long flags); 9845530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name, 9945530c44SChristoph Lameter size_t size, unsigned long flags); 10045530c44SChristoph Lameter 101423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s); 102423c929cSJoonsoo Kim struct kmem_cache *find_mergeable(size_t size, size_t align, 103423c929cSJoonsoo Kim unsigned long flags, const char *name, void (*ctor)(void *)); 10412220deaSJoonsoo Kim #ifndef CONFIG_SLOB 1052633d7a0SGlauber Costa struct kmem_cache * 106a44cb944SVladimir Davydov __kmem_cache_alias(const char *name, size_t size, size_t align, 107a44cb944SVladimir Davydov unsigned long flags, void (*ctor)(void *)); 108423c929cSJoonsoo Kim 109423c929cSJoonsoo Kim unsigned long kmem_cache_flags(unsigned long object_size, 110423c929cSJoonsoo Kim unsigned long flags, const char *name, 111423c929cSJoonsoo Kim void (*ctor)(void *)); 112cbb79694SChristoph Lameter #else 1132633d7a0SGlauber Costa static inline struct kmem_cache * 114a44cb944SVladimir Davydov __kmem_cache_alias(const char *name, size_t size, size_t align, 115a44cb944SVladimir Davydov unsigned long flags, void (*ctor)(void *)) 116cbb79694SChristoph Lameter { return NULL; } 117423c929cSJoonsoo Kim 118423c929cSJoonsoo Kim static inline unsigned long kmem_cache_flags(unsigned long object_size, 119423c929cSJoonsoo Kim unsigned long flags, const char *name, 120423c929cSJoonsoo Kim void (*ctor)(void *)) 121423c929cSJoonsoo Kim { 122423c929cSJoonsoo Kim return flags; 123423c929cSJoonsoo Kim } 124cbb79694SChristoph Lameter #endif 125cbb79694SChristoph Lameter 126cbb79694SChristoph Lameter 127d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */ 128d8843922SGlauber Costa #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 129d8843922SGlauber Costa SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 130d8843922SGlauber Costa 131d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB) 132d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 133d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG) 134d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 135becfda68SLaura Abbott SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 136d8843922SGlauber Costa #else 137d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0) 138d8843922SGlauber Costa #endif 139d8843922SGlauber Costa 140d8843922SGlauber Costa #if defined(CONFIG_SLAB) 141d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 142230e9fc2SVladimir Davydov SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 143230e9fc2SVladimir Davydov SLAB_NOTRACK | SLAB_ACCOUNT) 144d8843922SGlauber Costa #elif defined(CONFIG_SLUB) 145d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 146230e9fc2SVladimir Davydov SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) 147d8843922SGlauber Costa #else 148d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0) 149d8843922SGlauber Costa #endif 150d8843922SGlauber Costa 151e70954fdSThomas Garnier /* Common flags available with current configuration */ 152d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 153d8843922SGlauber Costa 154e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */ 155e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 156e70954fdSThomas Garnier SLAB_RED_ZONE | \ 157e70954fdSThomas Garnier SLAB_POISON | \ 158e70954fdSThomas Garnier SLAB_STORE_USER | \ 159e70954fdSThomas Garnier SLAB_TRACE | \ 160e70954fdSThomas Garnier SLAB_CONSISTENCY_CHECKS | \ 161e70954fdSThomas Garnier SLAB_MEM_SPREAD | \ 162e70954fdSThomas Garnier SLAB_NOLEAKTRACE | \ 163e70954fdSThomas Garnier SLAB_RECLAIM_ACCOUNT | \ 164e70954fdSThomas Garnier SLAB_TEMPORARY | \ 165e70954fdSThomas Garnier SLAB_NOTRACK | \ 166e70954fdSThomas Garnier SLAB_ACCOUNT) 167e70954fdSThomas Garnier 168945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 16952b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *); 170*c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *); 171*c9fc5864STejun Heo void __kmemcg_cache_deactivate(struct kmem_cache *s); 17241a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *); 173945cf2b6SChristoph Lameter 174b7454ad3SGlauber Costa struct seq_file; 175b7454ad3SGlauber Costa struct file; 176b7454ad3SGlauber Costa 1770d7561c6SGlauber Costa struct slabinfo { 1780d7561c6SGlauber Costa unsigned long active_objs; 1790d7561c6SGlauber Costa unsigned long num_objs; 1800d7561c6SGlauber Costa unsigned long active_slabs; 1810d7561c6SGlauber Costa unsigned long num_slabs; 1820d7561c6SGlauber Costa unsigned long shared_avail; 1830d7561c6SGlauber Costa unsigned int limit; 1840d7561c6SGlauber Costa unsigned int batchcount; 1850d7561c6SGlauber Costa unsigned int shared; 1860d7561c6SGlauber Costa unsigned int objects_per_slab; 1870d7561c6SGlauber Costa unsigned int cache_order; 1880d7561c6SGlauber Costa }; 1890d7561c6SGlauber Costa 1900d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 1910d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 192b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 193b7454ad3SGlauber Costa size_t count, loff_t *ppos); 194ba6c496eSGlauber Costa 195484748f0SChristoph Lameter /* 196484748f0SChristoph Lameter * Generic implementation of bulk operations 197484748f0SChristoph Lameter * These are useful for situations in which the allocator cannot 1989f706d68SJesper Dangaard Brouer * perform optimizations. In that case segments of the object listed 199484748f0SChristoph Lameter * may be allocated or freed using these operations. 200484748f0SChristoph Lameter */ 201484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 202865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 203484748f0SChristoph Lameter 204127424c8SJohannes Weiner #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 205510ded33STejun Heo 206510ded33STejun Heo /* List of all root caches. */ 207510ded33STejun Heo extern struct list_head slab_root_caches; 208510ded33STejun Heo #define root_caches_node memcg_params.__root_caches_node 209510ded33STejun Heo 210426589f5SVladimir Davydov /* 211426589f5SVladimir Davydov * Iterate over all memcg caches of the given root cache. The caller must hold 212426589f5SVladimir Davydov * slab_mutex. 213426589f5SVladimir Davydov */ 214426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \ 2159eeadc8bSTejun Heo list_for_each_entry(iter, &(root)->memcg_params.children, \ 2169eeadc8bSTejun Heo memcg_params.children_node) 217426589f5SVladimir Davydov 218ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 219ba6c496eSGlauber Costa { 2209eeadc8bSTejun Heo return !s->memcg_params.root_cache; 221ba6c496eSGlauber Costa } 2222633d7a0SGlauber Costa 223b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 224b9ce5ef4SGlauber Costa struct kmem_cache *p) 225b9ce5ef4SGlauber Costa { 226f7ce3190SVladimir Davydov return p == s || p == s->memcg_params.root_cache; 227b9ce5ef4SGlauber Costa } 228749c5415SGlauber Costa 229749c5415SGlauber Costa /* 230749c5415SGlauber Costa * We use suffixes to the name in memcg because we can't have caches 231749c5415SGlauber Costa * created in the system with the same name. But when we print them 232749c5415SGlauber Costa * locally, better refer to them with the base name 233749c5415SGlauber Costa */ 234749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 235749c5415SGlauber Costa { 236749c5415SGlauber Costa if (!is_root_cache(s)) 237f7ce3190SVladimir Davydov s = s->memcg_params.root_cache; 238749c5415SGlauber Costa return s->name; 239749c5415SGlauber Costa } 240749c5415SGlauber Costa 241f8570263SVladimir Davydov /* 242f8570263SVladimir Davydov * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 243f7ce3190SVladimir Davydov * That said the caller must assure the memcg's cache won't go away by either 244f7ce3190SVladimir Davydov * taking a css reference to the owner cgroup, or holding the slab_mutex. 245f8570263SVladimir Davydov */ 2462ade4de8SQiang Huang static inline struct kmem_cache * 2472ade4de8SQiang Huang cache_from_memcg_idx(struct kmem_cache *s, int idx) 248749c5415SGlauber Costa { 249959c8963SVladimir Davydov struct kmem_cache *cachep; 250f7ce3190SVladimir Davydov struct memcg_cache_array *arr; 251f8570263SVladimir Davydov 252f8570263SVladimir Davydov rcu_read_lock(); 253f7ce3190SVladimir Davydov arr = rcu_dereference(s->memcg_params.memcg_caches); 254959c8963SVladimir Davydov 255959c8963SVladimir Davydov /* 256959c8963SVladimir Davydov * Make sure we will access the up-to-date value. The code updating 257959c8963SVladimir Davydov * memcg_caches issues a write barrier to match this (see 258f7ce3190SVladimir Davydov * memcg_create_kmem_cache()). 259959c8963SVladimir Davydov */ 260f7ce3190SVladimir Davydov cachep = lockless_dereference(arr->entries[idx]); 2618df0c2dcSPranith Kumar rcu_read_unlock(); 2628df0c2dcSPranith Kumar 263959c8963SVladimir Davydov return cachep; 264749c5415SGlauber Costa } 265943a451aSGlauber Costa 266943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 267943a451aSGlauber Costa { 268943a451aSGlauber Costa if (is_root_cache(s)) 269943a451aSGlauber Costa return s; 270f7ce3190SVladimir Davydov return s->memcg_params.root_cache; 271943a451aSGlauber Costa } 2725dfb4175SVladimir Davydov 273f3ccb2c4SVladimir Davydov static __always_inline int memcg_charge_slab(struct page *page, 274f3ccb2c4SVladimir Davydov gfp_t gfp, int order, 275f3ccb2c4SVladimir Davydov struct kmem_cache *s) 2765dfb4175SVladimir Davydov { 27727ee57c9SVladimir Davydov int ret; 27827ee57c9SVladimir Davydov 2795dfb4175SVladimir Davydov if (!memcg_kmem_enabled()) 2805dfb4175SVladimir Davydov return 0; 2815dfb4175SVladimir Davydov if (is_root_cache(s)) 2825dfb4175SVladimir Davydov return 0; 28327ee57c9SVladimir Davydov 28445264778SVladimir Davydov ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); 28527ee57c9SVladimir Davydov if (ret) 28627ee57c9SVladimir Davydov return ret; 28727ee57c9SVladimir Davydov 28827ee57c9SVladimir Davydov memcg_kmem_update_page_stat(page, 28927ee57c9SVladimir Davydov (s->flags & SLAB_RECLAIM_ACCOUNT) ? 29027ee57c9SVladimir Davydov MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, 29127ee57c9SVladimir Davydov 1 << order); 29227ee57c9SVladimir Davydov return 0; 29327ee57c9SVladimir Davydov } 29427ee57c9SVladimir Davydov 29527ee57c9SVladimir Davydov static __always_inline void memcg_uncharge_slab(struct page *page, int order, 29627ee57c9SVladimir Davydov struct kmem_cache *s) 29727ee57c9SVladimir Davydov { 29845264778SVladimir Davydov if (!memcg_kmem_enabled()) 29945264778SVladimir Davydov return; 30045264778SVladimir Davydov 30127ee57c9SVladimir Davydov memcg_kmem_update_page_stat(page, 30227ee57c9SVladimir Davydov (s->flags & SLAB_RECLAIM_ACCOUNT) ? 30327ee57c9SVladimir Davydov MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, 30427ee57c9SVladimir Davydov -(1 << order)); 30527ee57c9SVladimir Davydov memcg_kmem_uncharge(page, order); 3065dfb4175SVladimir Davydov } 307f7ce3190SVladimir Davydov 308f7ce3190SVladimir Davydov extern void slab_init_memcg_params(struct kmem_cache *); 309510ded33STejun Heo extern void memcg_link_cache(struct kmem_cache *s); 310f7ce3190SVladimir Davydov 311127424c8SJohannes Weiner #else /* CONFIG_MEMCG && !CONFIG_SLOB */ 312f7ce3190SVladimir Davydov 313510ded33STejun Heo /* If !memcg, all caches are root. */ 314510ded33STejun Heo #define slab_root_caches slab_caches 315510ded33STejun Heo #define root_caches_node list 316510ded33STejun Heo 317426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \ 318426589f5SVladimir Davydov for ((void)(iter), (void)(root); 0; ) 319426589f5SVladimir Davydov 320ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 321ba6c496eSGlauber Costa { 322ba6c496eSGlauber Costa return true; 323ba6c496eSGlauber Costa } 324ba6c496eSGlauber Costa 325b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 326b9ce5ef4SGlauber Costa struct kmem_cache *p) 327b9ce5ef4SGlauber Costa { 328b9ce5ef4SGlauber Costa return true; 329b9ce5ef4SGlauber Costa } 330749c5415SGlauber Costa 331749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 332749c5415SGlauber Costa { 333749c5415SGlauber Costa return s->name; 334749c5415SGlauber Costa } 335749c5415SGlauber Costa 3362ade4de8SQiang Huang static inline struct kmem_cache * 3372ade4de8SQiang Huang cache_from_memcg_idx(struct kmem_cache *s, int idx) 338749c5415SGlauber Costa { 339749c5415SGlauber Costa return NULL; 340749c5415SGlauber Costa } 341943a451aSGlauber Costa 342943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 343943a451aSGlauber Costa { 344943a451aSGlauber Costa return s; 345943a451aSGlauber Costa } 3465dfb4175SVladimir Davydov 347f3ccb2c4SVladimir Davydov static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 348f3ccb2c4SVladimir Davydov struct kmem_cache *s) 3495dfb4175SVladimir Davydov { 3505dfb4175SVladimir Davydov return 0; 3515dfb4175SVladimir Davydov } 3525dfb4175SVladimir Davydov 35327ee57c9SVladimir Davydov static inline void memcg_uncharge_slab(struct page *page, int order, 35427ee57c9SVladimir Davydov struct kmem_cache *s) 35527ee57c9SVladimir Davydov { 35627ee57c9SVladimir Davydov } 35727ee57c9SVladimir Davydov 358f7ce3190SVladimir Davydov static inline void slab_init_memcg_params(struct kmem_cache *s) 359f7ce3190SVladimir Davydov { 360f7ce3190SVladimir Davydov } 361510ded33STejun Heo 362510ded33STejun Heo static inline void memcg_link_cache(struct kmem_cache *s) 363510ded33STejun Heo { 364510ded33STejun Heo } 365510ded33STejun Heo 366127424c8SJohannes Weiner #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 367b9ce5ef4SGlauber Costa 368b9ce5ef4SGlauber Costa static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 369b9ce5ef4SGlauber Costa { 370b9ce5ef4SGlauber Costa struct kmem_cache *cachep; 371b9ce5ef4SGlauber Costa struct page *page; 372b9ce5ef4SGlauber Costa 373b9ce5ef4SGlauber Costa /* 374b9ce5ef4SGlauber Costa * When kmemcg is not being used, both assignments should return the 375b9ce5ef4SGlauber Costa * same value. but we don't want to pay the assignment price in that 376b9ce5ef4SGlauber Costa * case. If it is not compiled in, the compiler should be smart enough 377b9ce5ef4SGlauber Costa * to not do even the assignment. In that case, slab_equal_or_root 378b9ce5ef4SGlauber Costa * will also be a constant. 379b9ce5ef4SGlauber Costa */ 380becfda68SLaura Abbott if (!memcg_kmem_enabled() && 381becfda68SLaura Abbott !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) 382b9ce5ef4SGlauber Costa return s; 383b9ce5ef4SGlauber Costa 384b9ce5ef4SGlauber Costa page = virt_to_head_page(x); 385b9ce5ef4SGlauber Costa cachep = page->slab_cache; 386b9ce5ef4SGlauber Costa if (slab_equal_or_root(cachep, s)) 387b9ce5ef4SGlauber Costa return cachep; 388b9ce5ef4SGlauber Costa 389b9ce5ef4SGlauber Costa pr_err("%s: Wrong slab cache. %s but object is from %s\n", 3902d16e0fdSDaniel Borkmann __func__, s->name, cachep->name); 391b9ce5ef4SGlauber Costa WARN_ON_ONCE(1); 392b9ce5ef4SGlauber Costa return s; 393b9ce5ef4SGlauber Costa } 394ca34956bSChristoph Lameter 39511c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s) 39611c7aec2SJesper Dangaard Brouer { 39711c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB 39811c7aec2SJesper Dangaard Brouer return s->object_size; 39911c7aec2SJesper Dangaard Brouer 40011c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */ 40111c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG 40211c7aec2SJesper Dangaard Brouer /* 40311c7aec2SJesper Dangaard Brouer * Debugging requires use of the padding between object 40411c7aec2SJesper Dangaard Brouer * and whatever may come after it. 40511c7aec2SJesper Dangaard Brouer */ 40611c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 40711c7aec2SJesper Dangaard Brouer return s->object_size; 40811c7aec2SJesper Dangaard Brouer # endif 40980a9201aSAlexander Potapenko if (s->flags & SLAB_KASAN) 41080a9201aSAlexander Potapenko return s->object_size; 41111c7aec2SJesper Dangaard Brouer /* 41211c7aec2SJesper Dangaard Brouer * If we have the need to store the freelist pointer 41311c7aec2SJesper Dangaard Brouer * back there or track user information then we can 41411c7aec2SJesper Dangaard Brouer * only use the space before that information. 41511c7aec2SJesper Dangaard Brouer */ 41611c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 41711c7aec2SJesper Dangaard Brouer return s->inuse; 41811c7aec2SJesper Dangaard Brouer /* 41911c7aec2SJesper Dangaard Brouer * Else we can use all the padding etc for the allocation 42011c7aec2SJesper Dangaard Brouer */ 42111c7aec2SJesper Dangaard Brouer return s->size; 42211c7aec2SJesper Dangaard Brouer #endif 42311c7aec2SJesper Dangaard Brouer } 42411c7aec2SJesper Dangaard Brouer 42511c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 42611c7aec2SJesper Dangaard Brouer gfp_t flags) 42711c7aec2SJesper Dangaard Brouer { 42811c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 42911c7aec2SJesper Dangaard Brouer lockdep_trace_alloc(flags); 43011c7aec2SJesper Dangaard Brouer might_sleep_if(gfpflags_allow_blocking(flags)); 43111c7aec2SJesper Dangaard Brouer 432fab9963aSJesper Dangaard Brouer if (should_failslab(s, flags)) 43311c7aec2SJesper Dangaard Brouer return NULL; 43411c7aec2SJesper Dangaard Brouer 43545264778SVladimir Davydov if (memcg_kmem_enabled() && 43645264778SVladimir Davydov ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 43745264778SVladimir Davydov return memcg_kmem_get_cache(s); 43845264778SVladimir Davydov 43945264778SVladimir Davydov return s; 44011c7aec2SJesper Dangaard Brouer } 44111c7aec2SJesper Dangaard Brouer 44211c7aec2SJesper Dangaard Brouer static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 44311c7aec2SJesper Dangaard Brouer size_t size, void **p) 44411c7aec2SJesper Dangaard Brouer { 44511c7aec2SJesper Dangaard Brouer size_t i; 44611c7aec2SJesper Dangaard Brouer 44711c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 44811c7aec2SJesper Dangaard Brouer for (i = 0; i < size; i++) { 44911c7aec2SJesper Dangaard Brouer void *object = p[i]; 45011c7aec2SJesper Dangaard Brouer 45111c7aec2SJesper Dangaard Brouer kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 45211c7aec2SJesper Dangaard Brouer kmemleak_alloc_recursive(object, s->object_size, 1, 45311c7aec2SJesper Dangaard Brouer s->flags, flags); 454505f5dcbSAlexander Potapenko kasan_slab_alloc(s, object, flags); 45511c7aec2SJesper Dangaard Brouer } 45645264778SVladimir Davydov 45745264778SVladimir Davydov if (memcg_kmem_enabled()) 45811c7aec2SJesper Dangaard Brouer memcg_kmem_put_cache(s); 45911c7aec2SJesper Dangaard Brouer } 46011c7aec2SJesper Dangaard Brouer 46144c5356fSChristoph Lameter #ifndef CONFIG_SLOB 462ca34956bSChristoph Lameter /* 463ca34956bSChristoph Lameter * The slab lists for all objects. 464ca34956bSChristoph Lameter */ 465ca34956bSChristoph Lameter struct kmem_cache_node { 466ca34956bSChristoph Lameter spinlock_t list_lock; 467ca34956bSChristoph Lameter 468ca34956bSChristoph Lameter #ifdef CONFIG_SLAB 469ca34956bSChristoph Lameter struct list_head slabs_partial; /* partial list first, better asm code */ 470ca34956bSChristoph Lameter struct list_head slabs_full; 471ca34956bSChristoph Lameter struct list_head slabs_free; 472bf00bd34SDavid Rientjes unsigned long total_slabs; /* length of all slab lists */ 473bf00bd34SDavid Rientjes unsigned long free_slabs; /* length of free slab list only */ 474ca34956bSChristoph Lameter unsigned long free_objects; 475ca34956bSChristoph Lameter unsigned int free_limit; 476ca34956bSChristoph Lameter unsigned int colour_next; /* Per-node cache coloring */ 477ca34956bSChristoph Lameter struct array_cache *shared; /* shared per node */ 478c8522a3aSJoonsoo Kim struct alien_cache **alien; /* on other nodes */ 479ca34956bSChristoph Lameter unsigned long next_reap; /* updated without locking */ 480ca34956bSChristoph Lameter int free_touched; /* updated without locking */ 481ca34956bSChristoph Lameter #endif 482ca34956bSChristoph Lameter 483ca34956bSChristoph Lameter #ifdef CONFIG_SLUB 484ca34956bSChristoph Lameter unsigned long nr_partial; 485ca34956bSChristoph Lameter struct list_head partial; 486ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG 487ca34956bSChristoph Lameter atomic_long_t nr_slabs; 488ca34956bSChristoph Lameter atomic_long_t total_objects; 489ca34956bSChristoph Lameter struct list_head full; 490ca34956bSChristoph Lameter #endif 491ca34956bSChristoph Lameter #endif 492ca34956bSChristoph Lameter 493ca34956bSChristoph Lameter }; 494e25839f6SWanpeng Li 49544c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 49644c5356fSChristoph Lameter { 49744c5356fSChristoph Lameter return s->node[node]; 49844c5356fSChristoph Lameter } 49944c5356fSChristoph Lameter 50044c5356fSChristoph Lameter /* 50144c5356fSChristoph Lameter * Iterator over all nodes. The body will be executed for each node that has 50244c5356fSChristoph Lameter * a kmem_cache_node structure allocated (which is true for all online nodes) 50344c5356fSChristoph Lameter */ 50444c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \ 5059163582cSMikulas Patocka for (__node = 0; __node < nr_node_ids; __node++) \ 5069163582cSMikulas Patocka if ((__n = get_node(__s, __node))) 50744c5356fSChristoph Lameter 50844c5356fSChristoph Lameter #endif 50944c5356fSChristoph Lameter 5101df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos); 511276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos); 512276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p); 513bc2791f8STejun Heo void *memcg_slab_start(struct seq_file *m, loff_t *pos); 514bc2791f8STejun Heo void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); 515bc2791f8STejun Heo void memcg_slab_stop(struct seq_file *m, void *p); 516b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p); 5175240ab40SAndrey Ryabinin 51855834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 51955834c59SAlexander Potapenko 5207c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM 5217c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 5227c00fce9SThomas Garnier gfp_t gfp); 5237c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep); 5247c00fce9SThomas Garnier #else 5257c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep, 5267c00fce9SThomas Garnier unsigned int count, gfp_t gfp) 5277c00fce9SThomas Garnier { 5287c00fce9SThomas Garnier return 0; 5297c00fce9SThomas Garnier } 5307c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 5317c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 5327c00fce9SThomas Garnier 5335240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */ 534