1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 297d06609SChristoph Lameter #ifndef MM_SLAB_H 397d06609SChristoph Lameter #define MM_SLAB_H 497d06609SChristoph Lameter /* 597d06609SChristoph Lameter * Internal slab definitions 697d06609SChristoph Lameter */ 797d06609SChristoph Lameter 807f361b2SJoonsoo Kim #ifdef CONFIG_SLOB 907f361b2SJoonsoo Kim /* 1007f361b2SJoonsoo Kim * Common fields provided in kmem_cache by all slab allocators 1107f361b2SJoonsoo Kim * This struct is either used directly by the allocator (SLOB) 1207f361b2SJoonsoo Kim * or the allocator must include definitions for all fields 1307f361b2SJoonsoo Kim * provided in kmem_cache_common in their definition of kmem_cache. 1407f361b2SJoonsoo Kim * 1507f361b2SJoonsoo Kim * Once we can do anonymous structs (C11 standard) we could put a 1607f361b2SJoonsoo Kim * anonymous struct definition in these allocators so that the 1707f361b2SJoonsoo Kim * separate allocations in the kmem_cache structure of SLAB and 1807f361b2SJoonsoo Kim * SLUB is no longer needed. 1907f361b2SJoonsoo Kim */ 2007f361b2SJoonsoo Kim struct kmem_cache { 2107f361b2SJoonsoo Kim unsigned int object_size;/* The original size of the object */ 2207f361b2SJoonsoo Kim unsigned int size; /* The aligned/padded/added on size */ 2307f361b2SJoonsoo Kim unsigned int align; /* Alignment as calculated */ 24d50112edSAlexey Dobriyan slab_flags_t flags; /* Active flags on the slab */ 258eb8284bSDavid Windsor size_t useroffset; /* Usercopy region offset */ 268eb8284bSDavid Windsor size_t usersize; /* Usercopy region size */ 2707f361b2SJoonsoo Kim const char *name; /* Slab name for sysfs */ 2807f361b2SJoonsoo Kim int refcount; /* Use counter */ 2907f361b2SJoonsoo Kim void (*ctor)(void *); /* Called on object slot creation */ 3007f361b2SJoonsoo Kim struct list_head list; /* List of all slab caches on the system */ 3107f361b2SJoonsoo Kim }; 3207f361b2SJoonsoo Kim 3307f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */ 3407f361b2SJoonsoo Kim 3507f361b2SJoonsoo Kim #ifdef CONFIG_SLAB 3607f361b2SJoonsoo Kim #include <linux/slab_def.h> 3707f361b2SJoonsoo Kim #endif 3807f361b2SJoonsoo Kim 3907f361b2SJoonsoo Kim #ifdef CONFIG_SLUB 4007f361b2SJoonsoo Kim #include <linux/slub_def.h> 4107f361b2SJoonsoo Kim #endif 4207f361b2SJoonsoo Kim 4307f361b2SJoonsoo Kim #include <linux/memcontrol.h> 4411c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h> 4511c7aec2SJesper Dangaard Brouer #include <linux/kasan.h> 4611c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h> 477c00fce9SThomas Garnier #include <linux/random.h> 48d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h> 4907f361b2SJoonsoo Kim 5097d06609SChristoph Lameter /* 5197d06609SChristoph Lameter * State of the slab allocator. 5297d06609SChristoph Lameter * 5397d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 5497d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 5597d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 5697d06609SChristoph Lameter * allocated from slab caches themselves. 5797d06609SChristoph Lameter */ 5897d06609SChristoph Lameter enum slab_state { 5997d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 6097d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 61ce8eb6c4SChristoph Lameter PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 6297d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 6397d06609SChristoph Lameter FULL /* Everything is working */ 6497d06609SChristoph Lameter }; 6597d06609SChristoph Lameter 6697d06609SChristoph Lameter extern enum slab_state slab_state; 6797d06609SChristoph Lameter 6818004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 6918004c5dSChristoph Lameter extern struct mutex slab_mutex; 709b030cb8SChristoph Lameter 719b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 7218004c5dSChristoph Lameter extern struct list_head slab_caches; 7318004c5dSChristoph Lameter 749b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 759b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 769b030cb8SChristoph Lameter 77af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */ 78af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct { 79af3b5f87SVlastimil Babka const char *name; 8055de8b9cSAlexey Dobriyan unsigned int size; 81af3b5f87SVlastimil Babka } kmalloc_info[]; 82af3b5f87SVlastimil Babka 83f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB 84f97d5f63SChristoph Lameter /* Kmalloc array related functions */ 8534cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void); 86d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t); 872c59dd65SChristoph Lameter 882c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */ 892c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t); 90f97d5f63SChristoph Lameter #endif 91f97d5f63SChristoph Lameter 92f97d5f63SChristoph Lameter 939b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 94d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 9597d06609SChristoph Lameter 9655de8b9cSAlexey Dobriyan struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, 9755de8b9cSAlexey Dobriyan slab_flags_t flags, unsigned int useroffset, 9855de8b9cSAlexey Dobriyan unsigned int usersize); 9945530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name, 100361d575eSAlexey Dobriyan unsigned int size, slab_flags_t flags, 101361d575eSAlexey Dobriyan unsigned int useroffset, unsigned int usersize); 10245530c44SChristoph Lameter 103423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s); 104*f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align, 105d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, void (*ctor)(void *)); 10612220deaSJoonsoo Kim #ifndef CONFIG_SLOB 1072633d7a0SGlauber Costa struct kmem_cache * 108*f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 109d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *)); 110423c929cSJoonsoo Kim 111d50112edSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned long object_size, 112d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, 113423c929cSJoonsoo Kim void (*ctor)(void *)); 114cbb79694SChristoph Lameter #else 1152633d7a0SGlauber Costa static inline struct kmem_cache * 116*f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 117d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *)) 118cbb79694SChristoph Lameter { return NULL; } 119423c929cSJoonsoo Kim 120d50112edSAlexey Dobriyan static inline slab_flags_t kmem_cache_flags(unsigned long object_size, 121d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, 122423c929cSJoonsoo Kim void (*ctor)(void *)) 123423c929cSJoonsoo Kim { 124423c929cSJoonsoo Kim return flags; 125423c929cSJoonsoo Kim } 126cbb79694SChristoph Lameter #endif 127cbb79694SChristoph Lameter 128cbb79694SChristoph Lameter 129d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */ 130d8843922SGlauber Costa #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 1315f0d5a3aSPaul E. McKenney SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 132d8843922SGlauber Costa 133d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB) 134d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 135d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG) 136d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 137becfda68SLaura Abbott SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 138d8843922SGlauber Costa #else 139d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0) 140d8843922SGlauber Costa #endif 141d8843922SGlauber Costa 142d8843922SGlauber Costa #if defined(CONFIG_SLAB) 143d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 144230e9fc2SVladimir Davydov SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 14575f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT) 146d8843922SGlauber Costa #elif defined(CONFIG_SLUB) 147d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 14875f296d9SLevin, Alexander (Sasha Levin) SLAB_TEMPORARY | SLAB_ACCOUNT) 149d8843922SGlauber Costa #else 150d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0) 151d8843922SGlauber Costa #endif 152d8843922SGlauber Costa 153e70954fdSThomas Garnier /* Common flags available with current configuration */ 154d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 155d8843922SGlauber Costa 156e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */ 157e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 158e70954fdSThomas Garnier SLAB_RED_ZONE | \ 159e70954fdSThomas Garnier SLAB_POISON | \ 160e70954fdSThomas Garnier SLAB_STORE_USER | \ 161e70954fdSThomas Garnier SLAB_TRACE | \ 162e70954fdSThomas Garnier SLAB_CONSISTENCY_CHECKS | \ 163e70954fdSThomas Garnier SLAB_MEM_SPREAD | \ 164e70954fdSThomas Garnier SLAB_NOLEAKTRACE | \ 165e70954fdSThomas Garnier SLAB_RECLAIM_ACCOUNT | \ 166e70954fdSThomas Garnier SLAB_TEMPORARY | \ 167e70954fdSThomas Garnier SLAB_ACCOUNT) 168e70954fdSThomas Garnier 169945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 17052b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *); 171c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *); 172c9fc5864STejun Heo void __kmemcg_cache_deactivate(struct kmem_cache *s); 17341a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *); 174945cf2b6SChristoph Lameter 175b7454ad3SGlauber Costa struct seq_file; 176b7454ad3SGlauber Costa struct file; 177b7454ad3SGlauber Costa 1780d7561c6SGlauber Costa struct slabinfo { 1790d7561c6SGlauber Costa unsigned long active_objs; 1800d7561c6SGlauber Costa unsigned long num_objs; 1810d7561c6SGlauber Costa unsigned long active_slabs; 1820d7561c6SGlauber Costa unsigned long num_slabs; 1830d7561c6SGlauber Costa unsigned long shared_avail; 1840d7561c6SGlauber Costa unsigned int limit; 1850d7561c6SGlauber Costa unsigned int batchcount; 1860d7561c6SGlauber Costa unsigned int shared; 1870d7561c6SGlauber Costa unsigned int objects_per_slab; 1880d7561c6SGlauber Costa unsigned int cache_order; 1890d7561c6SGlauber Costa }; 1900d7561c6SGlauber Costa 1910d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 1920d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 193b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 194b7454ad3SGlauber Costa size_t count, loff_t *ppos); 195ba6c496eSGlauber Costa 196484748f0SChristoph Lameter /* 197484748f0SChristoph Lameter * Generic implementation of bulk operations 198484748f0SChristoph Lameter * These are useful for situations in which the allocator cannot 1999f706d68SJesper Dangaard Brouer * perform optimizations. In that case segments of the object listed 200484748f0SChristoph Lameter * may be allocated or freed using these operations. 201484748f0SChristoph Lameter */ 202484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 203865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 204484748f0SChristoph Lameter 205127424c8SJohannes Weiner #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 206510ded33STejun Heo 207510ded33STejun Heo /* List of all root caches. */ 208510ded33STejun Heo extern struct list_head slab_root_caches; 209510ded33STejun Heo #define root_caches_node memcg_params.__root_caches_node 210510ded33STejun Heo 211426589f5SVladimir Davydov /* 212426589f5SVladimir Davydov * Iterate over all memcg caches of the given root cache. The caller must hold 213426589f5SVladimir Davydov * slab_mutex. 214426589f5SVladimir Davydov */ 215426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \ 2169eeadc8bSTejun Heo list_for_each_entry(iter, &(root)->memcg_params.children, \ 2179eeadc8bSTejun Heo memcg_params.children_node) 218426589f5SVladimir Davydov 219ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 220ba6c496eSGlauber Costa { 2219eeadc8bSTejun Heo return !s->memcg_params.root_cache; 222ba6c496eSGlauber Costa } 2232633d7a0SGlauber Costa 224b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 225b9ce5ef4SGlauber Costa struct kmem_cache *p) 226b9ce5ef4SGlauber Costa { 227f7ce3190SVladimir Davydov return p == s || p == s->memcg_params.root_cache; 228b9ce5ef4SGlauber Costa } 229749c5415SGlauber Costa 230749c5415SGlauber Costa /* 231749c5415SGlauber Costa * We use suffixes to the name in memcg because we can't have caches 232749c5415SGlauber Costa * created in the system with the same name. But when we print them 233749c5415SGlauber Costa * locally, better refer to them with the base name 234749c5415SGlauber Costa */ 235749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 236749c5415SGlauber Costa { 237749c5415SGlauber Costa if (!is_root_cache(s)) 238f7ce3190SVladimir Davydov s = s->memcg_params.root_cache; 239749c5415SGlauber Costa return s->name; 240749c5415SGlauber Costa } 241749c5415SGlauber Costa 242f8570263SVladimir Davydov /* 243f8570263SVladimir Davydov * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 244f7ce3190SVladimir Davydov * That said the caller must assure the memcg's cache won't go away by either 245f7ce3190SVladimir Davydov * taking a css reference to the owner cgroup, or holding the slab_mutex. 246f8570263SVladimir Davydov */ 2472ade4de8SQiang Huang static inline struct kmem_cache * 2482ade4de8SQiang Huang cache_from_memcg_idx(struct kmem_cache *s, int idx) 249749c5415SGlauber Costa { 250959c8963SVladimir Davydov struct kmem_cache *cachep; 251f7ce3190SVladimir Davydov struct memcg_cache_array *arr; 252f8570263SVladimir Davydov 253f8570263SVladimir Davydov rcu_read_lock(); 254f7ce3190SVladimir Davydov arr = rcu_dereference(s->memcg_params.memcg_caches); 255959c8963SVladimir Davydov 256959c8963SVladimir Davydov /* 257959c8963SVladimir Davydov * Make sure we will access the up-to-date value. The code updating 258959c8963SVladimir Davydov * memcg_caches issues a write barrier to match this (see 259f7ce3190SVladimir Davydov * memcg_create_kmem_cache()). 260959c8963SVladimir Davydov */ 261506458efSWill Deacon cachep = READ_ONCE(arr->entries[idx]); 2628df0c2dcSPranith Kumar rcu_read_unlock(); 2638df0c2dcSPranith Kumar 264959c8963SVladimir Davydov return cachep; 265749c5415SGlauber Costa } 266943a451aSGlauber Costa 267943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 268943a451aSGlauber Costa { 269943a451aSGlauber Costa if (is_root_cache(s)) 270943a451aSGlauber Costa return s; 271f7ce3190SVladimir Davydov return s->memcg_params.root_cache; 272943a451aSGlauber Costa } 2735dfb4175SVladimir Davydov 274f3ccb2c4SVladimir Davydov static __always_inline int memcg_charge_slab(struct page *page, 275f3ccb2c4SVladimir Davydov gfp_t gfp, int order, 276f3ccb2c4SVladimir Davydov struct kmem_cache *s) 2775dfb4175SVladimir Davydov { 2785dfb4175SVladimir Davydov if (!memcg_kmem_enabled()) 2795dfb4175SVladimir Davydov return 0; 2805dfb4175SVladimir Davydov if (is_root_cache(s)) 2815dfb4175SVladimir Davydov return 0; 2827779f212SJohannes Weiner return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); 28327ee57c9SVladimir Davydov } 28427ee57c9SVladimir Davydov 28527ee57c9SVladimir Davydov static __always_inline void memcg_uncharge_slab(struct page *page, int order, 28627ee57c9SVladimir Davydov struct kmem_cache *s) 28727ee57c9SVladimir Davydov { 28845264778SVladimir Davydov if (!memcg_kmem_enabled()) 28945264778SVladimir Davydov return; 29027ee57c9SVladimir Davydov memcg_kmem_uncharge(page, order); 2915dfb4175SVladimir Davydov } 292f7ce3190SVladimir Davydov 293f7ce3190SVladimir Davydov extern void slab_init_memcg_params(struct kmem_cache *); 294510ded33STejun Heo extern void memcg_link_cache(struct kmem_cache *s); 29501fb58bcSTejun Heo extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 29601fb58bcSTejun Heo void (*deact_fn)(struct kmem_cache *)); 297f7ce3190SVladimir Davydov 298127424c8SJohannes Weiner #else /* CONFIG_MEMCG && !CONFIG_SLOB */ 299f7ce3190SVladimir Davydov 300510ded33STejun Heo /* If !memcg, all caches are root. */ 301510ded33STejun Heo #define slab_root_caches slab_caches 302510ded33STejun Heo #define root_caches_node list 303510ded33STejun Heo 304426589f5SVladimir Davydov #define for_each_memcg_cache(iter, root) \ 305426589f5SVladimir Davydov for ((void)(iter), (void)(root); 0; ) 306426589f5SVladimir Davydov 307ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 308ba6c496eSGlauber Costa { 309ba6c496eSGlauber Costa return true; 310ba6c496eSGlauber Costa } 311ba6c496eSGlauber Costa 312b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 313b9ce5ef4SGlauber Costa struct kmem_cache *p) 314b9ce5ef4SGlauber Costa { 315b9ce5ef4SGlauber Costa return true; 316b9ce5ef4SGlauber Costa } 317749c5415SGlauber Costa 318749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 319749c5415SGlauber Costa { 320749c5415SGlauber Costa return s->name; 321749c5415SGlauber Costa } 322749c5415SGlauber Costa 3232ade4de8SQiang Huang static inline struct kmem_cache * 3242ade4de8SQiang Huang cache_from_memcg_idx(struct kmem_cache *s, int idx) 325749c5415SGlauber Costa { 326749c5415SGlauber Costa return NULL; 327749c5415SGlauber Costa } 328943a451aSGlauber Costa 329943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 330943a451aSGlauber Costa { 331943a451aSGlauber Costa return s; 332943a451aSGlauber Costa } 3335dfb4175SVladimir Davydov 334f3ccb2c4SVladimir Davydov static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 335f3ccb2c4SVladimir Davydov struct kmem_cache *s) 3365dfb4175SVladimir Davydov { 3375dfb4175SVladimir Davydov return 0; 3385dfb4175SVladimir Davydov } 3395dfb4175SVladimir Davydov 34027ee57c9SVladimir Davydov static inline void memcg_uncharge_slab(struct page *page, int order, 34127ee57c9SVladimir Davydov struct kmem_cache *s) 34227ee57c9SVladimir Davydov { 34327ee57c9SVladimir Davydov } 34427ee57c9SVladimir Davydov 345f7ce3190SVladimir Davydov static inline void slab_init_memcg_params(struct kmem_cache *s) 346f7ce3190SVladimir Davydov { 347f7ce3190SVladimir Davydov } 348510ded33STejun Heo 349510ded33STejun Heo static inline void memcg_link_cache(struct kmem_cache *s) 350510ded33STejun Heo { 351510ded33STejun Heo } 352510ded33STejun Heo 353127424c8SJohannes Weiner #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 354b9ce5ef4SGlauber Costa 355b9ce5ef4SGlauber Costa static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 356b9ce5ef4SGlauber Costa { 357b9ce5ef4SGlauber Costa struct kmem_cache *cachep; 358b9ce5ef4SGlauber Costa struct page *page; 359b9ce5ef4SGlauber Costa 360b9ce5ef4SGlauber Costa /* 361b9ce5ef4SGlauber Costa * When kmemcg is not being used, both assignments should return the 362b9ce5ef4SGlauber Costa * same value. but we don't want to pay the assignment price in that 363b9ce5ef4SGlauber Costa * case. If it is not compiled in, the compiler should be smart enough 364b9ce5ef4SGlauber Costa * to not do even the assignment. In that case, slab_equal_or_root 365b9ce5ef4SGlauber Costa * will also be a constant. 366b9ce5ef4SGlauber Costa */ 367becfda68SLaura Abbott if (!memcg_kmem_enabled() && 368becfda68SLaura Abbott !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) 369b9ce5ef4SGlauber Costa return s; 370b9ce5ef4SGlauber Costa 371b9ce5ef4SGlauber Costa page = virt_to_head_page(x); 372b9ce5ef4SGlauber Costa cachep = page->slab_cache; 373b9ce5ef4SGlauber Costa if (slab_equal_or_root(cachep, s)) 374b9ce5ef4SGlauber Costa return cachep; 375b9ce5ef4SGlauber Costa 376b9ce5ef4SGlauber Costa pr_err("%s: Wrong slab cache. %s but object is from %s\n", 3772d16e0fdSDaniel Borkmann __func__, s->name, cachep->name); 378b9ce5ef4SGlauber Costa WARN_ON_ONCE(1); 379b9ce5ef4SGlauber Costa return s; 380b9ce5ef4SGlauber Costa } 381ca34956bSChristoph Lameter 38211c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s) 38311c7aec2SJesper Dangaard Brouer { 38411c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB 38511c7aec2SJesper Dangaard Brouer return s->object_size; 38611c7aec2SJesper Dangaard Brouer 38711c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */ 38811c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG 38911c7aec2SJesper Dangaard Brouer /* 39011c7aec2SJesper Dangaard Brouer * Debugging requires use of the padding between object 39111c7aec2SJesper Dangaard Brouer * and whatever may come after it. 39211c7aec2SJesper Dangaard Brouer */ 39311c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 39411c7aec2SJesper Dangaard Brouer return s->object_size; 39511c7aec2SJesper Dangaard Brouer # endif 39680a9201aSAlexander Potapenko if (s->flags & SLAB_KASAN) 39780a9201aSAlexander Potapenko return s->object_size; 39811c7aec2SJesper Dangaard Brouer /* 39911c7aec2SJesper Dangaard Brouer * If we have the need to store the freelist pointer 40011c7aec2SJesper Dangaard Brouer * back there or track user information then we can 40111c7aec2SJesper Dangaard Brouer * only use the space before that information. 40211c7aec2SJesper Dangaard Brouer */ 4035f0d5a3aSPaul E. McKenney if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 40411c7aec2SJesper Dangaard Brouer return s->inuse; 40511c7aec2SJesper Dangaard Brouer /* 40611c7aec2SJesper Dangaard Brouer * Else we can use all the padding etc for the allocation 40711c7aec2SJesper Dangaard Brouer */ 40811c7aec2SJesper Dangaard Brouer return s->size; 40911c7aec2SJesper Dangaard Brouer #endif 41011c7aec2SJesper Dangaard Brouer } 41111c7aec2SJesper Dangaard Brouer 41211c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 41311c7aec2SJesper Dangaard Brouer gfp_t flags) 41411c7aec2SJesper Dangaard Brouer { 41511c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 416d92a8cfcSPeter Zijlstra 417d92a8cfcSPeter Zijlstra fs_reclaim_acquire(flags); 418d92a8cfcSPeter Zijlstra fs_reclaim_release(flags); 419d92a8cfcSPeter Zijlstra 42011c7aec2SJesper Dangaard Brouer might_sleep_if(gfpflags_allow_blocking(flags)); 42111c7aec2SJesper Dangaard Brouer 422fab9963aSJesper Dangaard Brouer if (should_failslab(s, flags)) 42311c7aec2SJesper Dangaard Brouer return NULL; 42411c7aec2SJesper Dangaard Brouer 42545264778SVladimir Davydov if (memcg_kmem_enabled() && 42645264778SVladimir Davydov ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 42745264778SVladimir Davydov return memcg_kmem_get_cache(s); 42845264778SVladimir Davydov 42945264778SVladimir Davydov return s; 43011c7aec2SJesper Dangaard Brouer } 43111c7aec2SJesper Dangaard Brouer 43211c7aec2SJesper Dangaard Brouer static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 43311c7aec2SJesper Dangaard Brouer size_t size, void **p) 43411c7aec2SJesper Dangaard Brouer { 43511c7aec2SJesper Dangaard Brouer size_t i; 43611c7aec2SJesper Dangaard Brouer 43711c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 43811c7aec2SJesper Dangaard Brouer for (i = 0; i < size; i++) { 43911c7aec2SJesper Dangaard Brouer void *object = p[i]; 44011c7aec2SJesper Dangaard Brouer 44111c7aec2SJesper Dangaard Brouer kmemleak_alloc_recursive(object, s->object_size, 1, 44211c7aec2SJesper Dangaard Brouer s->flags, flags); 443505f5dcbSAlexander Potapenko kasan_slab_alloc(s, object, flags); 44411c7aec2SJesper Dangaard Brouer } 44545264778SVladimir Davydov 44645264778SVladimir Davydov if (memcg_kmem_enabled()) 44711c7aec2SJesper Dangaard Brouer memcg_kmem_put_cache(s); 44811c7aec2SJesper Dangaard Brouer } 44911c7aec2SJesper Dangaard Brouer 45044c5356fSChristoph Lameter #ifndef CONFIG_SLOB 451ca34956bSChristoph Lameter /* 452ca34956bSChristoph Lameter * The slab lists for all objects. 453ca34956bSChristoph Lameter */ 454ca34956bSChristoph Lameter struct kmem_cache_node { 455ca34956bSChristoph Lameter spinlock_t list_lock; 456ca34956bSChristoph Lameter 457ca34956bSChristoph Lameter #ifdef CONFIG_SLAB 458ca34956bSChristoph Lameter struct list_head slabs_partial; /* partial list first, better asm code */ 459ca34956bSChristoph Lameter struct list_head slabs_full; 460ca34956bSChristoph Lameter struct list_head slabs_free; 461bf00bd34SDavid Rientjes unsigned long total_slabs; /* length of all slab lists */ 462bf00bd34SDavid Rientjes unsigned long free_slabs; /* length of free slab list only */ 463ca34956bSChristoph Lameter unsigned long free_objects; 464ca34956bSChristoph Lameter unsigned int free_limit; 465ca34956bSChristoph Lameter unsigned int colour_next; /* Per-node cache coloring */ 466ca34956bSChristoph Lameter struct array_cache *shared; /* shared per node */ 467c8522a3aSJoonsoo Kim struct alien_cache **alien; /* on other nodes */ 468ca34956bSChristoph Lameter unsigned long next_reap; /* updated without locking */ 469ca34956bSChristoph Lameter int free_touched; /* updated without locking */ 470ca34956bSChristoph Lameter #endif 471ca34956bSChristoph Lameter 472ca34956bSChristoph Lameter #ifdef CONFIG_SLUB 473ca34956bSChristoph Lameter unsigned long nr_partial; 474ca34956bSChristoph Lameter struct list_head partial; 475ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG 476ca34956bSChristoph Lameter atomic_long_t nr_slabs; 477ca34956bSChristoph Lameter atomic_long_t total_objects; 478ca34956bSChristoph Lameter struct list_head full; 479ca34956bSChristoph Lameter #endif 480ca34956bSChristoph Lameter #endif 481ca34956bSChristoph Lameter 482ca34956bSChristoph Lameter }; 483e25839f6SWanpeng Li 48444c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 48544c5356fSChristoph Lameter { 48644c5356fSChristoph Lameter return s->node[node]; 48744c5356fSChristoph Lameter } 48844c5356fSChristoph Lameter 48944c5356fSChristoph Lameter /* 49044c5356fSChristoph Lameter * Iterator over all nodes. The body will be executed for each node that has 49144c5356fSChristoph Lameter * a kmem_cache_node structure allocated (which is true for all online nodes) 49244c5356fSChristoph Lameter */ 49344c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \ 4949163582cSMikulas Patocka for (__node = 0; __node < nr_node_ids; __node++) \ 4959163582cSMikulas Patocka if ((__n = get_node(__s, __node))) 49644c5356fSChristoph Lameter 49744c5356fSChristoph Lameter #endif 49844c5356fSChristoph Lameter 4991df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos); 500276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos); 501276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p); 502bc2791f8STejun Heo void *memcg_slab_start(struct seq_file *m, loff_t *pos); 503bc2791f8STejun Heo void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); 504bc2791f8STejun Heo void memcg_slab_stop(struct seq_file *m, void *p); 505b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p); 5065240ab40SAndrey Ryabinin 507852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 508852d8be0SYang Shi void dump_unreclaimable_slab(void); 509852d8be0SYang Shi #else 510852d8be0SYang Shi static inline void dump_unreclaimable_slab(void) 511852d8be0SYang Shi { 512852d8be0SYang Shi } 513852d8be0SYang Shi #endif 514852d8be0SYang Shi 51555834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 51655834c59SAlexander Potapenko 5177c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM 5187c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 5197c00fce9SThomas Garnier gfp_t gfp); 5207c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep); 5217c00fce9SThomas Garnier #else 5227c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep, 5237c00fce9SThomas Garnier unsigned int count, gfp_t gfp) 5247c00fce9SThomas Garnier { 5257c00fce9SThomas Garnier return 0; 5267c00fce9SThomas Garnier } 5277c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 5287c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 5297c00fce9SThomas Garnier 5305240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */ 531