1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 297d06609SChristoph Lameter #ifndef MM_SLAB_H 397d06609SChristoph Lameter #define MM_SLAB_H 497d06609SChristoph Lameter /* 597d06609SChristoph Lameter * Internal slab definitions 697d06609SChristoph Lameter */ 797d06609SChristoph Lameter 8d122019bSMatthew Wilcox (Oracle) /* Reuses the bits in struct page */ 9d122019bSMatthew Wilcox (Oracle) struct slab { 10d122019bSMatthew Wilcox (Oracle) unsigned long __page_flags; 11d122019bSMatthew Wilcox (Oracle) union { 12d122019bSMatthew Wilcox (Oracle) struct list_head slab_list; 13d122019bSMatthew Wilcox (Oracle) struct { /* Partial pages */ 14d122019bSMatthew Wilcox (Oracle) struct slab *next; 15d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_64BIT 16d122019bSMatthew Wilcox (Oracle) int slabs; /* Nr of slabs left */ 17d122019bSMatthew Wilcox (Oracle) #else 18d122019bSMatthew Wilcox (Oracle) short int slabs; 19d122019bSMatthew Wilcox (Oracle) #endif 20d122019bSMatthew Wilcox (Oracle) }; 21d122019bSMatthew Wilcox (Oracle) struct rcu_head rcu_head; 22d122019bSMatthew Wilcox (Oracle) }; 23d122019bSMatthew Wilcox (Oracle) struct kmem_cache *slab_cache; /* not slob */ 24d122019bSMatthew Wilcox (Oracle) /* Double-word boundary */ 25d122019bSMatthew Wilcox (Oracle) void *freelist; /* first free object */ 26d122019bSMatthew Wilcox (Oracle) union { 27d122019bSMatthew Wilcox (Oracle) void *s_mem; /* slab: first object */ 28d122019bSMatthew Wilcox (Oracle) unsigned long counters; /* SLUB */ 29d122019bSMatthew Wilcox (Oracle) struct { /* SLUB */ 30d122019bSMatthew Wilcox (Oracle) unsigned inuse:16; 31d122019bSMatthew Wilcox (Oracle) unsigned objects:15; 32d122019bSMatthew Wilcox (Oracle) unsigned frozen:1; 33d122019bSMatthew Wilcox (Oracle) }; 34d122019bSMatthew Wilcox (Oracle) }; 35d122019bSMatthew Wilcox (Oracle) 36d122019bSMatthew Wilcox (Oracle) union { 37d122019bSMatthew Wilcox (Oracle) unsigned int active; /* SLAB */ 38d122019bSMatthew Wilcox (Oracle) int units; /* SLOB */ 39d122019bSMatthew Wilcox (Oracle) }; 40d122019bSMatthew Wilcox (Oracle) atomic_t __page_refcount; 41d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG 42d122019bSMatthew Wilcox (Oracle) unsigned long memcg_data; 43d122019bSMatthew Wilcox (Oracle) #endif 44d122019bSMatthew Wilcox (Oracle) }; 45d122019bSMatthew Wilcox (Oracle) 46d122019bSMatthew Wilcox (Oracle) #define SLAB_MATCH(pg, sl) \ 47d122019bSMatthew Wilcox (Oracle) static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) 48d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(flags, __page_flags); 49d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ 50d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(slab_list, slab_list); 51d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(rcu_head, rcu_head); 52d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(slab_cache, slab_cache); 53d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(s_mem, s_mem); 54d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(active, active); 55d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(_refcount, __page_refcount); 56d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG 57d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(memcg_data, memcg_data); 58d122019bSMatthew Wilcox (Oracle) #endif 59d122019bSMatthew Wilcox (Oracle) #undef SLAB_MATCH 60d122019bSMatthew Wilcox (Oracle) static_assert(sizeof(struct slab) <= sizeof(struct page)); 61d122019bSMatthew Wilcox (Oracle) 62d122019bSMatthew Wilcox (Oracle) /** 63d122019bSMatthew Wilcox (Oracle) * folio_slab - Converts from folio to slab. 64d122019bSMatthew Wilcox (Oracle) * @folio: The folio. 65d122019bSMatthew Wilcox (Oracle) * 66d122019bSMatthew Wilcox (Oracle) * Currently struct slab is a different representation of a folio where 67d122019bSMatthew Wilcox (Oracle) * folio_test_slab() is true. 68d122019bSMatthew Wilcox (Oracle) * 69d122019bSMatthew Wilcox (Oracle) * Return: The slab which contains this folio. 70d122019bSMatthew Wilcox (Oracle) */ 71d122019bSMatthew Wilcox (Oracle) #define folio_slab(folio) (_Generic((folio), \ 72d122019bSMatthew Wilcox (Oracle) const struct folio *: (const struct slab *)(folio), \ 73d122019bSMatthew Wilcox (Oracle) struct folio *: (struct slab *)(folio))) 74d122019bSMatthew Wilcox (Oracle) 75d122019bSMatthew Wilcox (Oracle) /** 76d122019bSMatthew Wilcox (Oracle) * slab_folio - The folio allocated for a slab 77d122019bSMatthew Wilcox (Oracle) * @slab: The slab. 78d122019bSMatthew Wilcox (Oracle) * 79d122019bSMatthew Wilcox (Oracle) * Slabs are allocated as folios that contain the individual objects and are 80d122019bSMatthew Wilcox (Oracle) * using some fields in the first struct page of the folio - those fields are 81d122019bSMatthew Wilcox (Oracle) * now accessed by struct slab. It is occasionally necessary to convert back to 82d122019bSMatthew Wilcox (Oracle) * a folio in order to communicate with the rest of the mm. Please use this 83d122019bSMatthew Wilcox (Oracle) * helper function instead of casting yourself, as the implementation may change 84d122019bSMatthew Wilcox (Oracle) * in the future. 85d122019bSMatthew Wilcox (Oracle) */ 86d122019bSMatthew Wilcox (Oracle) #define slab_folio(s) (_Generic((s), \ 87d122019bSMatthew Wilcox (Oracle) const struct slab *: (const struct folio *)s, \ 88d122019bSMatthew Wilcox (Oracle) struct slab *: (struct folio *)s)) 89d122019bSMatthew Wilcox (Oracle) 90d122019bSMatthew Wilcox (Oracle) /** 91d122019bSMatthew Wilcox (Oracle) * page_slab - Converts from first struct page to slab. 92d122019bSMatthew Wilcox (Oracle) * @p: The first (either head of compound or single) page of slab. 93d122019bSMatthew Wilcox (Oracle) * 94d122019bSMatthew Wilcox (Oracle) * A temporary wrapper to convert struct page to struct slab in situations where 95d122019bSMatthew Wilcox (Oracle) * we know the page is the compound head, or single order-0 page. 96d122019bSMatthew Wilcox (Oracle) * 97d122019bSMatthew Wilcox (Oracle) * Long-term ideally everything would work with struct slab directly or go 98d122019bSMatthew Wilcox (Oracle) * through folio to struct slab. 99d122019bSMatthew Wilcox (Oracle) * 100d122019bSMatthew Wilcox (Oracle) * Return: The slab which contains this page 101d122019bSMatthew Wilcox (Oracle) */ 102d122019bSMatthew Wilcox (Oracle) #define page_slab(p) (_Generic((p), \ 103d122019bSMatthew Wilcox (Oracle) const struct page *: (const struct slab *)(p), \ 104d122019bSMatthew Wilcox (Oracle) struct page *: (struct slab *)(p))) 105d122019bSMatthew Wilcox (Oracle) 106d122019bSMatthew Wilcox (Oracle) /** 107d122019bSMatthew Wilcox (Oracle) * slab_page - The first struct page allocated for a slab 108d122019bSMatthew Wilcox (Oracle) * @slab: The slab. 109d122019bSMatthew Wilcox (Oracle) * 110d122019bSMatthew Wilcox (Oracle) * A convenience wrapper for converting slab to the first struct page of the 111d122019bSMatthew Wilcox (Oracle) * underlying folio, to communicate with code not yet converted to folio or 112d122019bSMatthew Wilcox (Oracle) * struct slab. 113d122019bSMatthew Wilcox (Oracle) */ 114d122019bSMatthew Wilcox (Oracle) #define slab_page(s) folio_page(slab_folio(s), 0) 115d122019bSMatthew Wilcox (Oracle) 116d122019bSMatthew Wilcox (Oracle) /* 117d122019bSMatthew Wilcox (Oracle) * If network-based swap is enabled, sl*b must keep track of whether pages 118d122019bSMatthew Wilcox (Oracle) * were allocated from pfmemalloc reserves. 119d122019bSMatthew Wilcox (Oracle) */ 120d122019bSMatthew Wilcox (Oracle) static inline bool slab_test_pfmemalloc(const struct slab *slab) 121d122019bSMatthew Wilcox (Oracle) { 122d122019bSMatthew Wilcox (Oracle) return folio_test_active((struct folio *)slab_folio(slab)); 123d122019bSMatthew Wilcox (Oracle) } 124d122019bSMatthew Wilcox (Oracle) 125d122019bSMatthew Wilcox (Oracle) static inline void slab_set_pfmemalloc(struct slab *slab) 126d122019bSMatthew Wilcox (Oracle) { 127d122019bSMatthew Wilcox (Oracle) folio_set_active(slab_folio(slab)); 128d122019bSMatthew Wilcox (Oracle) } 129d122019bSMatthew Wilcox (Oracle) 130d122019bSMatthew Wilcox (Oracle) static inline void slab_clear_pfmemalloc(struct slab *slab) 131d122019bSMatthew Wilcox (Oracle) { 132d122019bSMatthew Wilcox (Oracle) folio_clear_active(slab_folio(slab)); 133d122019bSMatthew Wilcox (Oracle) } 134d122019bSMatthew Wilcox (Oracle) 135d122019bSMatthew Wilcox (Oracle) static inline void __slab_clear_pfmemalloc(struct slab *slab) 136d122019bSMatthew Wilcox (Oracle) { 137d122019bSMatthew Wilcox (Oracle) __folio_clear_active(slab_folio(slab)); 138d122019bSMatthew Wilcox (Oracle) } 139d122019bSMatthew Wilcox (Oracle) 140d122019bSMatthew Wilcox (Oracle) static inline void *slab_address(const struct slab *slab) 141d122019bSMatthew Wilcox (Oracle) { 142d122019bSMatthew Wilcox (Oracle) return folio_address(slab_folio(slab)); 143d122019bSMatthew Wilcox (Oracle) } 144d122019bSMatthew Wilcox (Oracle) 145d122019bSMatthew Wilcox (Oracle) static inline int slab_nid(const struct slab *slab) 146d122019bSMatthew Wilcox (Oracle) { 147d122019bSMatthew Wilcox (Oracle) return folio_nid(slab_folio(slab)); 148d122019bSMatthew Wilcox (Oracle) } 149d122019bSMatthew Wilcox (Oracle) 150d122019bSMatthew Wilcox (Oracle) static inline pg_data_t *slab_pgdat(const struct slab *slab) 151d122019bSMatthew Wilcox (Oracle) { 152d122019bSMatthew Wilcox (Oracle) return folio_pgdat(slab_folio(slab)); 153d122019bSMatthew Wilcox (Oracle) } 154d122019bSMatthew Wilcox (Oracle) 155d122019bSMatthew Wilcox (Oracle) static inline struct slab *virt_to_slab(const void *addr) 156d122019bSMatthew Wilcox (Oracle) { 157d122019bSMatthew Wilcox (Oracle) struct folio *folio = virt_to_folio(addr); 158d122019bSMatthew Wilcox (Oracle) 159d122019bSMatthew Wilcox (Oracle) if (!folio_test_slab(folio)) 160d122019bSMatthew Wilcox (Oracle) return NULL; 161d122019bSMatthew Wilcox (Oracle) 162d122019bSMatthew Wilcox (Oracle) return folio_slab(folio); 163d122019bSMatthew Wilcox (Oracle) } 164d122019bSMatthew Wilcox (Oracle) 165d122019bSMatthew Wilcox (Oracle) static inline int slab_order(const struct slab *slab) 166d122019bSMatthew Wilcox (Oracle) { 167d122019bSMatthew Wilcox (Oracle) return folio_order((struct folio *)slab_folio(slab)); 168d122019bSMatthew Wilcox (Oracle) } 169d122019bSMatthew Wilcox (Oracle) 170d122019bSMatthew Wilcox (Oracle) static inline size_t slab_size(const struct slab *slab) 171d122019bSMatthew Wilcox (Oracle) { 172d122019bSMatthew Wilcox (Oracle) return PAGE_SIZE << slab_order(slab); 173d122019bSMatthew Wilcox (Oracle) } 174d122019bSMatthew Wilcox (Oracle) 17507f361b2SJoonsoo Kim #ifdef CONFIG_SLOB 17607f361b2SJoonsoo Kim /* 17707f361b2SJoonsoo Kim * Common fields provided in kmem_cache by all slab allocators 17807f361b2SJoonsoo Kim * This struct is either used directly by the allocator (SLOB) 17907f361b2SJoonsoo Kim * or the allocator must include definitions for all fields 18007f361b2SJoonsoo Kim * provided in kmem_cache_common in their definition of kmem_cache. 18107f361b2SJoonsoo Kim * 18207f361b2SJoonsoo Kim * Once we can do anonymous structs (C11 standard) we could put a 18307f361b2SJoonsoo Kim * anonymous struct definition in these allocators so that the 18407f361b2SJoonsoo Kim * separate allocations in the kmem_cache structure of SLAB and 18507f361b2SJoonsoo Kim * SLUB is no longer needed. 18607f361b2SJoonsoo Kim */ 18707f361b2SJoonsoo Kim struct kmem_cache { 18807f361b2SJoonsoo Kim unsigned int object_size;/* The original size of the object */ 18907f361b2SJoonsoo Kim unsigned int size; /* The aligned/padded/added on size */ 19007f361b2SJoonsoo Kim unsigned int align; /* Alignment as calculated */ 191d50112edSAlexey Dobriyan slab_flags_t flags; /* Active flags on the slab */ 1927bbdb81eSAlexey Dobriyan unsigned int useroffset;/* Usercopy region offset */ 1937bbdb81eSAlexey Dobriyan unsigned int usersize; /* Usercopy region size */ 19407f361b2SJoonsoo Kim const char *name; /* Slab name for sysfs */ 19507f361b2SJoonsoo Kim int refcount; /* Use counter */ 19607f361b2SJoonsoo Kim void (*ctor)(void *); /* Called on object slot creation */ 19707f361b2SJoonsoo Kim struct list_head list; /* List of all slab caches on the system */ 19807f361b2SJoonsoo Kim }; 19907f361b2SJoonsoo Kim 20007f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */ 20107f361b2SJoonsoo Kim 20207f361b2SJoonsoo Kim #ifdef CONFIG_SLAB 20307f361b2SJoonsoo Kim #include <linux/slab_def.h> 20407f361b2SJoonsoo Kim #endif 20507f361b2SJoonsoo Kim 20607f361b2SJoonsoo Kim #ifdef CONFIG_SLUB 20707f361b2SJoonsoo Kim #include <linux/slub_def.h> 20807f361b2SJoonsoo Kim #endif 20907f361b2SJoonsoo Kim 21007f361b2SJoonsoo Kim #include <linux/memcontrol.h> 21111c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h> 21211c7aec2SJesper Dangaard Brouer #include <linux/kasan.h> 21311c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h> 2147c00fce9SThomas Garnier #include <linux/random.h> 215d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h> 21607f361b2SJoonsoo Kim 21797d06609SChristoph Lameter /* 21897d06609SChristoph Lameter * State of the slab allocator. 21997d06609SChristoph Lameter * 22097d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 22197d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 22297d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 22397d06609SChristoph Lameter * allocated from slab caches themselves. 22497d06609SChristoph Lameter */ 22597d06609SChristoph Lameter enum slab_state { 22697d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 22797d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 228ce8eb6c4SChristoph Lameter PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 22997d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 23097d06609SChristoph Lameter FULL /* Everything is working */ 23197d06609SChristoph Lameter }; 23297d06609SChristoph Lameter 23397d06609SChristoph Lameter extern enum slab_state slab_state; 23497d06609SChristoph Lameter 23518004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 23618004c5dSChristoph Lameter extern struct mutex slab_mutex; 2379b030cb8SChristoph Lameter 2389b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 23918004c5dSChristoph Lameter extern struct list_head slab_caches; 24018004c5dSChristoph Lameter 2419b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 2429b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 2439b030cb8SChristoph Lameter 244af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */ 245af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct { 246cb5d9fb3SPengfei Li const char *name[NR_KMALLOC_TYPES]; 24755de8b9cSAlexey Dobriyan unsigned int size; 248af3b5f87SVlastimil Babka } kmalloc_info[]; 249af3b5f87SVlastimil Babka 250f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB 251f97d5f63SChristoph Lameter /* Kmalloc array related functions */ 25234cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void); 253d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t); 2542c59dd65SChristoph Lameter 2552c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */ 2562c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t); 257f97d5f63SChristoph Lameter #endif 258f97d5f63SChristoph Lameter 25944405099SLong Li gfp_t kmalloc_fix_flags(gfp_t flags); 260f97d5f63SChristoph Lameter 2619b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 262d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 26397d06609SChristoph Lameter 26455de8b9cSAlexey Dobriyan struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, 26555de8b9cSAlexey Dobriyan slab_flags_t flags, unsigned int useroffset, 26655de8b9cSAlexey Dobriyan unsigned int usersize); 26745530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name, 268361d575eSAlexey Dobriyan unsigned int size, slab_flags_t flags, 269361d575eSAlexey Dobriyan unsigned int useroffset, unsigned int usersize); 27045530c44SChristoph Lameter 271423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s); 272f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align, 273d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, void (*ctor)(void *)); 27412220deaSJoonsoo Kim #ifndef CONFIG_SLOB 2752633d7a0SGlauber Costa struct kmem_cache * 276f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 277d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *)); 278423c929cSJoonsoo Kim 2790293d1fdSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned int object_size, 28037540008SNikolay Borisov slab_flags_t flags, const char *name); 281cbb79694SChristoph Lameter #else 2822633d7a0SGlauber Costa static inline struct kmem_cache * 283f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 284d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *)) 285cbb79694SChristoph Lameter { return NULL; } 286423c929cSJoonsoo Kim 2870293d1fdSAlexey Dobriyan static inline slab_flags_t kmem_cache_flags(unsigned int object_size, 28837540008SNikolay Borisov slab_flags_t flags, const char *name) 289423c929cSJoonsoo Kim { 290423c929cSJoonsoo Kim return flags; 291423c929cSJoonsoo Kim } 292cbb79694SChristoph Lameter #endif 293cbb79694SChristoph Lameter 294cbb79694SChristoph Lameter 295d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */ 2966d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ 2976d6ea1e9SNicolas Boichat SLAB_CACHE_DMA32 | SLAB_PANIC | \ 2985f0d5a3aSPaul E. McKenney SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 299d8843922SGlauber Costa 300d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB) 301d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 302d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG) 303d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 304becfda68SLaura Abbott SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 305d8843922SGlauber Costa #else 306d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0) 307d8843922SGlauber Costa #endif 308d8843922SGlauber Costa 309d8843922SGlauber Costa #if defined(CONFIG_SLAB) 310d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 311230e9fc2SVladimir Davydov SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 31275f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT) 313d8843922SGlauber Costa #elif defined(CONFIG_SLUB) 314d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 31575f296d9SLevin, Alexander (Sasha Levin) SLAB_TEMPORARY | SLAB_ACCOUNT) 316d8843922SGlauber Costa #else 31734dbc3aaSRustam Kovhaev #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) 318d8843922SGlauber Costa #endif 319d8843922SGlauber Costa 320e70954fdSThomas Garnier /* Common flags available with current configuration */ 321d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 322d8843922SGlauber Costa 323e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */ 324e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 325e70954fdSThomas Garnier SLAB_RED_ZONE | \ 326e70954fdSThomas Garnier SLAB_POISON | \ 327e70954fdSThomas Garnier SLAB_STORE_USER | \ 328e70954fdSThomas Garnier SLAB_TRACE | \ 329e70954fdSThomas Garnier SLAB_CONSISTENCY_CHECKS | \ 330e70954fdSThomas Garnier SLAB_MEM_SPREAD | \ 331e70954fdSThomas Garnier SLAB_NOLEAKTRACE | \ 332e70954fdSThomas Garnier SLAB_RECLAIM_ACCOUNT | \ 333e70954fdSThomas Garnier SLAB_TEMPORARY | \ 334e70954fdSThomas Garnier SLAB_ACCOUNT) 335e70954fdSThomas Garnier 336f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *); 337945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 33852b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *); 339c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *); 34041a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *); 341945cf2b6SChristoph Lameter 342b7454ad3SGlauber Costa struct seq_file; 343b7454ad3SGlauber Costa struct file; 344b7454ad3SGlauber Costa 3450d7561c6SGlauber Costa struct slabinfo { 3460d7561c6SGlauber Costa unsigned long active_objs; 3470d7561c6SGlauber Costa unsigned long num_objs; 3480d7561c6SGlauber Costa unsigned long active_slabs; 3490d7561c6SGlauber Costa unsigned long num_slabs; 3500d7561c6SGlauber Costa unsigned long shared_avail; 3510d7561c6SGlauber Costa unsigned int limit; 3520d7561c6SGlauber Costa unsigned int batchcount; 3530d7561c6SGlauber Costa unsigned int shared; 3540d7561c6SGlauber Costa unsigned int objects_per_slab; 3550d7561c6SGlauber Costa unsigned int cache_order; 3560d7561c6SGlauber Costa }; 3570d7561c6SGlauber Costa 3580d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 3590d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 360b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 361b7454ad3SGlauber Costa size_t count, loff_t *ppos); 362ba6c496eSGlauber Costa 363484748f0SChristoph Lameter /* 364484748f0SChristoph Lameter * Generic implementation of bulk operations 365484748f0SChristoph Lameter * These are useful for situations in which the allocator cannot 3669f706d68SJesper Dangaard Brouer * perform optimizations. In that case segments of the object listed 367484748f0SChristoph Lameter * may be allocated or freed using these operations. 368484748f0SChristoph Lameter */ 369484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 370865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 371484748f0SChristoph Lameter 3721a984c4eSMuchun Song static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) 3736cea1d56SRoman Gushchin { 3746cea1d56SRoman Gushchin return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 375d42f3245SRoman Gushchin NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; 3766cea1d56SRoman Gushchin } 3776cea1d56SRoman Gushchin 378e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG 379e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG_ON 380e42f174eSVlastimil Babka DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); 381e42f174eSVlastimil Babka #else 382e42f174eSVlastimil Babka DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); 383e42f174eSVlastimil Babka #endif 384e42f174eSVlastimil Babka extern void print_tracking(struct kmem_cache *s, void *object); 3851f9f78b1SOliver Glitta long validate_slab_cache(struct kmem_cache *s); 3860d4a062aSMarco Elver static inline bool __slub_debug_enabled(void) 3870d4a062aSMarco Elver { 3880d4a062aSMarco Elver return static_branch_unlikely(&slub_debug_enabled); 3890d4a062aSMarco Elver } 390e42f174eSVlastimil Babka #else 391e42f174eSVlastimil Babka static inline void print_tracking(struct kmem_cache *s, void *object) 392e42f174eSVlastimil Babka { 393e42f174eSVlastimil Babka } 3940d4a062aSMarco Elver static inline bool __slub_debug_enabled(void) 3950d4a062aSMarco Elver { 3960d4a062aSMarco Elver return false; 3970d4a062aSMarco Elver } 398e42f174eSVlastimil Babka #endif 399e42f174eSVlastimil Babka 400e42f174eSVlastimil Babka /* 401e42f174eSVlastimil Babka * Returns true if any of the specified slub_debug flags is enabled for the 402e42f174eSVlastimil Babka * cache. Use only for flags parsed by setup_slub_debug() as it also enables 403e42f174eSVlastimil Babka * the static key. 404e42f174eSVlastimil Babka */ 405e42f174eSVlastimil Babka static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) 406e42f174eSVlastimil Babka { 4070d4a062aSMarco Elver if (IS_ENABLED(CONFIG_SLUB_DEBUG)) 408e42f174eSVlastimil Babka VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); 4090d4a062aSMarco Elver if (__slub_debug_enabled()) 410e42f174eSVlastimil Babka return s->flags & flags; 411e42f174eSVlastimil Babka return false; 412e42f174eSVlastimil Babka } 413e42f174eSVlastimil Babka 41484c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 41510befea9SRoman Gushchin int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 4162e9bd483SRoman Gushchin gfp_t gfp, bool new_page); 417fdbcb2a6SWaiman Long void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 418fdbcb2a6SWaiman Long enum node_stat_item idx, int nr); 419286e04b8SRoman Gushchin 420286e04b8SRoman Gushchin static inline void memcg_free_page_obj_cgroups(struct page *page) 421286e04b8SRoman Gushchin { 422270c6a71SRoman Gushchin kfree(page_objcgs(page)); 423bcfe06bfSRoman Gushchin page->memcg_data = 0; 424286e04b8SRoman Gushchin } 425286e04b8SRoman Gushchin 426f2fe7b09SRoman Gushchin static inline size_t obj_full_size(struct kmem_cache *s) 427f2fe7b09SRoman Gushchin { 428f2fe7b09SRoman Gushchin /* 429f2fe7b09SRoman Gushchin * For each accounted object there is an extra space which is used 430f2fe7b09SRoman Gushchin * to store obj_cgroup membership. Charge it too. 431f2fe7b09SRoman Gushchin */ 432f2fe7b09SRoman Gushchin return s->size + sizeof(struct obj_cgroup *); 433f2fe7b09SRoman Gushchin } 434f2fe7b09SRoman Gushchin 435becaba65SRoman Gushchin /* 436becaba65SRoman Gushchin * Returns false if the allocation should fail. 437becaba65SRoman Gushchin */ 438becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 439becaba65SRoman Gushchin struct obj_cgroup **objcgp, 440becaba65SRoman Gushchin size_t objects, gfp_t flags) 441f2fe7b09SRoman Gushchin { 4429855609bSRoman Gushchin struct obj_cgroup *objcg; 443f2fe7b09SRoman Gushchin 444becaba65SRoman Gushchin if (!memcg_kmem_enabled()) 445becaba65SRoman Gushchin return true; 446becaba65SRoman Gushchin 447becaba65SRoman Gushchin if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) 448becaba65SRoman Gushchin return true; 449becaba65SRoman Gushchin 4509855609bSRoman Gushchin objcg = get_obj_cgroup_from_current(); 4519855609bSRoman Gushchin if (!objcg) 452becaba65SRoman Gushchin return true; 4539855609bSRoman Gushchin 4549855609bSRoman Gushchin if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { 4559855609bSRoman Gushchin obj_cgroup_put(objcg); 456becaba65SRoman Gushchin return false; 457f2fe7b09SRoman Gushchin } 458f2fe7b09SRoman Gushchin 459becaba65SRoman Gushchin *objcgp = objcg; 460becaba65SRoman Gushchin return true; 461f2fe7b09SRoman Gushchin } 462f2fe7b09SRoman Gushchin 463964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 464964d4bd3SRoman Gushchin struct obj_cgroup *objcg, 46510befea9SRoman Gushchin gfp_t flags, size_t size, 46610befea9SRoman Gushchin void **p) 467964d4bd3SRoman Gushchin { 468964d4bd3SRoman Gushchin struct page *page; 469964d4bd3SRoman Gushchin unsigned long off; 470964d4bd3SRoman Gushchin size_t i; 471964d4bd3SRoman Gushchin 472becaba65SRoman Gushchin if (!memcg_kmem_enabled() || !objcg) 47310befea9SRoman Gushchin return; 47410befea9SRoman Gushchin 475964d4bd3SRoman Gushchin for (i = 0; i < size; i++) { 476964d4bd3SRoman Gushchin if (likely(p[i])) { 477964d4bd3SRoman Gushchin page = virt_to_head_page(p[i]); 47810befea9SRoman Gushchin 479270c6a71SRoman Gushchin if (!page_objcgs(page) && 4802e9bd483SRoman Gushchin memcg_alloc_page_obj_cgroups(page, s, flags, 4812e9bd483SRoman Gushchin false)) { 48210befea9SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s)); 48310befea9SRoman Gushchin continue; 48410befea9SRoman Gushchin } 48510befea9SRoman Gushchin 486964d4bd3SRoman Gushchin off = obj_to_index(s, page, p[i]); 487964d4bd3SRoman Gushchin obj_cgroup_get(objcg); 488270c6a71SRoman Gushchin page_objcgs(page)[off] = objcg; 489f2fe7b09SRoman Gushchin mod_objcg_state(objcg, page_pgdat(page), 490f2fe7b09SRoman Gushchin cache_vmstat_idx(s), obj_full_size(s)); 491f2fe7b09SRoman Gushchin } else { 492f2fe7b09SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s)); 493964d4bd3SRoman Gushchin } 494964d4bd3SRoman Gushchin } 495964d4bd3SRoman Gushchin obj_cgroup_put(objcg); 496964d4bd3SRoman Gushchin } 497964d4bd3SRoman Gushchin 498d1b2cf6cSBharata B Rao static inline void memcg_slab_free_hook(struct kmem_cache *s_orig, 499d1b2cf6cSBharata B Rao void **p, int objects) 500964d4bd3SRoman Gushchin { 501d1b2cf6cSBharata B Rao struct kmem_cache *s; 502270c6a71SRoman Gushchin struct obj_cgroup **objcgs; 503964d4bd3SRoman Gushchin struct obj_cgroup *objcg; 504d1b2cf6cSBharata B Rao struct page *page; 505964d4bd3SRoman Gushchin unsigned int off; 506d1b2cf6cSBharata B Rao int i; 507964d4bd3SRoman Gushchin 50810befea9SRoman Gushchin if (!memcg_kmem_enabled()) 50910befea9SRoman Gushchin return; 51010befea9SRoman Gushchin 511d1b2cf6cSBharata B Rao for (i = 0; i < objects; i++) { 512d1b2cf6cSBharata B Rao if (unlikely(!p[i])) 513d1b2cf6cSBharata B Rao continue; 514d1b2cf6cSBharata B Rao 515d1b2cf6cSBharata B Rao page = virt_to_head_page(p[i]); 516121dffe2SWang Hai objcgs = page_objcgs_check(page); 517270c6a71SRoman Gushchin if (!objcgs) 518d1b2cf6cSBharata B Rao continue; 519964d4bd3SRoman Gushchin 520d1b2cf6cSBharata B Rao if (!s_orig) 521d1b2cf6cSBharata B Rao s = page->slab_cache; 522d1b2cf6cSBharata B Rao else 523d1b2cf6cSBharata B Rao s = s_orig; 524d1b2cf6cSBharata B Rao 525d1b2cf6cSBharata B Rao off = obj_to_index(s, page, p[i]); 526270c6a71SRoman Gushchin objcg = objcgs[off]; 52710befea9SRoman Gushchin if (!objcg) 528d1b2cf6cSBharata B Rao continue; 52910befea9SRoman Gushchin 530270c6a71SRoman Gushchin objcgs[off] = NULL; 531f2fe7b09SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s)); 532f2fe7b09SRoman Gushchin mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), 533f2fe7b09SRoman Gushchin -obj_full_size(s)); 534964d4bd3SRoman Gushchin obj_cgroup_put(objcg); 535964d4bd3SRoman Gushchin } 536d1b2cf6cSBharata B Rao } 537964d4bd3SRoman Gushchin 53884c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */ 5399855609bSRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) 5404d96ba35SRoman Gushchin { 5414d96ba35SRoman Gushchin return NULL; 5424d96ba35SRoman Gushchin } 5434d96ba35SRoman Gushchin 544286e04b8SRoman Gushchin static inline int memcg_alloc_page_obj_cgroups(struct page *page, 5452e9bd483SRoman Gushchin struct kmem_cache *s, gfp_t gfp, 5462e9bd483SRoman Gushchin bool new_page) 547286e04b8SRoman Gushchin { 548286e04b8SRoman Gushchin return 0; 549286e04b8SRoman Gushchin } 550286e04b8SRoman Gushchin 551286e04b8SRoman Gushchin static inline void memcg_free_page_obj_cgroups(struct page *page) 552286e04b8SRoman Gushchin { 553286e04b8SRoman Gushchin } 554286e04b8SRoman Gushchin 555becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 556becaba65SRoman Gushchin struct obj_cgroup **objcgp, 557becaba65SRoman Gushchin size_t objects, gfp_t flags) 558f2fe7b09SRoman Gushchin { 559becaba65SRoman Gushchin return true; 560f2fe7b09SRoman Gushchin } 561f2fe7b09SRoman Gushchin 562964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 563964d4bd3SRoman Gushchin struct obj_cgroup *objcg, 56410befea9SRoman Gushchin gfp_t flags, size_t size, 56510befea9SRoman Gushchin void **p) 566964d4bd3SRoman Gushchin { 567964d4bd3SRoman Gushchin } 568964d4bd3SRoman Gushchin 569d1b2cf6cSBharata B Rao static inline void memcg_slab_free_hook(struct kmem_cache *s, 570d1b2cf6cSBharata B Rao void **p, int objects) 571964d4bd3SRoman Gushchin { 572964d4bd3SRoman Gushchin } 57384c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */ 574b9ce5ef4SGlauber Costa 575a64b5378SKees Cook static inline struct kmem_cache *virt_to_cache(const void *obj) 576a64b5378SKees Cook { 57782c1775dSMatthew Wilcox (Oracle) struct slab *slab; 578a64b5378SKees Cook 57982c1775dSMatthew Wilcox (Oracle) slab = virt_to_slab(obj); 58082c1775dSMatthew Wilcox (Oracle) if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", 581a64b5378SKees Cook __func__)) 582a64b5378SKees Cook return NULL; 58382c1775dSMatthew Wilcox (Oracle) return slab->slab_cache; 584a64b5378SKees Cook } 585a64b5378SKees Cook 586b918653bSMatthew Wilcox (Oracle) static __always_inline void account_slab(struct slab *slab, int order, 587b918653bSMatthew Wilcox (Oracle) struct kmem_cache *s, gfp_t gfp) 5886cea1d56SRoman Gushchin { 5892e9bd483SRoman Gushchin if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT)) 590b918653bSMatthew Wilcox (Oracle) memcg_alloc_page_obj_cgroups(slab_page(slab), s, gfp, true); 5912e9bd483SRoman Gushchin 592b918653bSMatthew Wilcox (Oracle) mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 593f2fe7b09SRoman Gushchin PAGE_SIZE << order); 5946cea1d56SRoman Gushchin } 5956cea1d56SRoman Gushchin 596b918653bSMatthew Wilcox (Oracle) static __always_inline void unaccount_slab(struct slab *slab, int order, 5976cea1d56SRoman Gushchin struct kmem_cache *s) 5986cea1d56SRoman Gushchin { 59910befea9SRoman Gushchin if (memcg_kmem_enabled()) 600b918653bSMatthew Wilcox (Oracle) memcg_free_page_obj_cgroups(slab_page(slab)); 6019855609bSRoman Gushchin 602b918653bSMatthew Wilcox (Oracle) mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 603d42f3245SRoman Gushchin -(PAGE_SIZE << order)); 6046cea1d56SRoman Gushchin } 6056cea1d56SRoman Gushchin 606e42f174eSVlastimil Babka static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 607e42f174eSVlastimil Babka { 608e42f174eSVlastimil Babka struct kmem_cache *cachep; 609e42f174eSVlastimil Babka 610e42f174eSVlastimil Babka if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 611e42f174eSVlastimil Babka !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 612e42f174eSVlastimil Babka return s; 613e42f174eSVlastimil Babka 614e42f174eSVlastimil Babka cachep = virt_to_cache(x); 61510befea9SRoman Gushchin if (WARN(cachep && cachep != s, 616e42f174eSVlastimil Babka "%s: Wrong slab cache. %s but object is from %s\n", 617e42f174eSVlastimil Babka __func__, s->name, cachep->name)) 618e42f174eSVlastimil Babka print_tracking(cachep, x); 619e42f174eSVlastimil Babka return cachep; 620e42f174eSVlastimil Babka } 621e42f174eSVlastimil Babka 62211c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s) 62311c7aec2SJesper Dangaard Brouer { 62411c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB 62511c7aec2SJesper Dangaard Brouer return s->object_size; 62611c7aec2SJesper Dangaard Brouer 62711c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */ 62811c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG 62911c7aec2SJesper Dangaard Brouer /* 63011c7aec2SJesper Dangaard Brouer * Debugging requires use of the padding between object 63111c7aec2SJesper Dangaard Brouer * and whatever may come after it. 63211c7aec2SJesper Dangaard Brouer */ 63311c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 63411c7aec2SJesper Dangaard Brouer return s->object_size; 63511c7aec2SJesper Dangaard Brouer # endif 63680a9201aSAlexander Potapenko if (s->flags & SLAB_KASAN) 63780a9201aSAlexander Potapenko return s->object_size; 63811c7aec2SJesper Dangaard Brouer /* 63911c7aec2SJesper Dangaard Brouer * If we have the need to store the freelist pointer 64011c7aec2SJesper Dangaard Brouer * back there or track user information then we can 64111c7aec2SJesper Dangaard Brouer * only use the space before that information. 64211c7aec2SJesper Dangaard Brouer */ 6435f0d5a3aSPaul E. McKenney if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 64411c7aec2SJesper Dangaard Brouer return s->inuse; 64511c7aec2SJesper Dangaard Brouer /* 64611c7aec2SJesper Dangaard Brouer * Else we can use all the padding etc for the allocation 64711c7aec2SJesper Dangaard Brouer */ 64811c7aec2SJesper Dangaard Brouer return s->size; 64911c7aec2SJesper Dangaard Brouer #endif 65011c7aec2SJesper Dangaard Brouer } 65111c7aec2SJesper Dangaard Brouer 65211c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 653964d4bd3SRoman Gushchin struct obj_cgroup **objcgp, 654964d4bd3SRoman Gushchin size_t size, gfp_t flags) 65511c7aec2SJesper Dangaard Brouer { 65611c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 657d92a8cfcSPeter Zijlstra 65895d6c701SDaniel Vetter might_alloc(flags); 65911c7aec2SJesper Dangaard Brouer 660fab9963aSJesper Dangaard Brouer if (should_failslab(s, flags)) 66111c7aec2SJesper Dangaard Brouer return NULL; 66211c7aec2SJesper Dangaard Brouer 663becaba65SRoman Gushchin if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) 664becaba65SRoman Gushchin return NULL; 66545264778SVladimir Davydov 66645264778SVladimir Davydov return s; 66711c7aec2SJesper Dangaard Brouer } 66811c7aec2SJesper Dangaard Brouer 669964d4bd3SRoman Gushchin static inline void slab_post_alloc_hook(struct kmem_cache *s, 670da844b78SAndrey Konovalov struct obj_cgroup *objcg, gfp_t flags, 671da844b78SAndrey Konovalov size_t size, void **p, bool init) 67211c7aec2SJesper Dangaard Brouer { 67311c7aec2SJesper Dangaard Brouer size_t i; 67411c7aec2SJesper Dangaard Brouer 67511c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 676da844b78SAndrey Konovalov 677da844b78SAndrey Konovalov /* 678da844b78SAndrey Konovalov * As memory initialization might be integrated into KASAN, 679da844b78SAndrey Konovalov * kasan_slab_alloc and initialization memset must be 680da844b78SAndrey Konovalov * kept together to avoid discrepancies in behavior. 681da844b78SAndrey Konovalov * 682da844b78SAndrey Konovalov * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 683da844b78SAndrey Konovalov */ 68411c7aec2SJesper Dangaard Brouer for (i = 0; i < size; i++) { 685da844b78SAndrey Konovalov p[i] = kasan_slab_alloc(s, p[i], flags, init); 686da844b78SAndrey Konovalov if (p[i] && init && !kasan_has_integrated_init()) 687da844b78SAndrey Konovalov memset(p[i], 0, s->object_size); 68853128245SAndrey Konovalov kmemleak_alloc_recursive(p[i], s->object_size, 1, 68911c7aec2SJesper Dangaard Brouer s->flags, flags); 69011c7aec2SJesper Dangaard Brouer } 69145264778SVladimir Davydov 69210befea9SRoman Gushchin memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 69311c7aec2SJesper Dangaard Brouer } 69411c7aec2SJesper Dangaard Brouer 69544c5356fSChristoph Lameter #ifndef CONFIG_SLOB 696ca34956bSChristoph Lameter /* 697ca34956bSChristoph Lameter * The slab lists for all objects. 698ca34956bSChristoph Lameter */ 699ca34956bSChristoph Lameter struct kmem_cache_node { 700ca34956bSChristoph Lameter spinlock_t list_lock; 701ca34956bSChristoph Lameter 702ca34956bSChristoph Lameter #ifdef CONFIG_SLAB 703ca34956bSChristoph Lameter struct list_head slabs_partial; /* partial list first, better asm code */ 704ca34956bSChristoph Lameter struct list_head slabs_full; 705ca34956bSChristoph Lameter struct list_head slabs_free; 706bf00bd34SDavid Rientjes unsigned long total_slabs; /* length of all slab lists */ 707bf00bd34SDavid Rientjes unsigned long free_slabs; /* length of free slab list only */ 708ca34956bSChristoph Lameter unsigned long free_objects; 709ca34956bSChristoph Lameter unsigned int free_limit; 710ca34956bSChristoph Lameter unsigned int colour_next; /* Per-node cache coloring */ 711ca34956bSChristoph Lameter struct array_cache *shared; /* shared per node */ 712c8522a3aSJoonsoo Kim struct alien_cache **alien; /* on other nodes */ 713ca34956bSChristoph Lameter unsigned long next_reap; /* updated without locking */ 714ca34956bSChristoph Lameter int free_touched; /* updated without locking */ 715ca34956bSChristoph Lameter #endif 716ca34956bSChristoph Lameter 717ca34956bSChristoph Lameter #ifdef CONFIG_SLUB 718ca34956bSChristoph Lameter unsigned long nr_partial; 719ca34956bSChristoph Lameter struct list_head partial; 720ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG 721ca34956bSChristoph Lameter atomic_long_t nr_slabs; 722ca34956bSChristoph Lameter atomic_long_t total_objects; 723ca34956bSChristoph Lameter struct list_head full; 724ca34956bSChristoph Lameter #endif 725ca34956bSChristoph Lameter #endif 726ca34956bSChristoph Lameter 727ca34956bSChristoph Lameter }; 728e25839f6SWanpeng Li 72944c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 73044c5356fSChristoph Lameter { 73144c5356fSChristoph Lameter return s->node[node]; 73244c5356fSChristoph Lameter } 73344c5356fSChristoph Lameter 73444c5356fSChristoph Lameter /* 73544c5356fSChristoph Lameter * Iterator over all nodes. The body will be executed for each node that has 73644c5356fSChristoph Lameter * a kmem_cache_node structure allocated (which is true for all online nodes) 73744c5356fSChristoph Lameter */ 73844c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \ 7399163582cSMikulas Patocka for (__node = 0; __node < nr_node_ids; __node++) \ 7409163582cSMikulas Patocka if ((__n = get_node(__s, __node))) 74144c5356fSChristoph Lameter 74244c5356fSChristoph Lameter #endif 74344c5356fSChristoph Lameter 7441df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos); 745276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos); 746276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p); 747b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p); 7485240ab40SAndrey Ryabinin 749852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 750852d8be0SYang Shi void dump_unreclaimable_slab(void); 751852d8be0SYang Shi #else 752852d8be0SYang Shi static inline void dump_unreclaimable_slab(void) 753852d8be0SYang Shi { 754852d8be0SYang Shi } 755852d8be0SYang Shi #endif 756852d8be0SYang Shi 75755834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 75855834c59SAlexander Potapenko 7597c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM 7607c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 7617c00fce9SThomas Garnier gfp_t gfp); 7627c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep); 7637c00fce9SThomas Garnier #else 7647c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep, 7657c00fce9SThomas Garnier unsigned int count, gfp_t gfp) 7667c00fce9SThomas Garnier { 7677c00fce9SThomas Garnier return 0; 7687c00fce9SThomas Garnier } 7697c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 7707c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 7717c00fce9SThomas Garnier 7726471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) 7736471384aSAlexander Potapenko { 77451cba1ebSKees Cook if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 77551cba1ebSKees Cook &init_on_alloc)) { 7766471384aSAlexander Potapenko if (c->ctor) 7776471384aSAlexander Potapenko return false; 7786471384aSAlexander Potapenko if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) 7796471384aSAlexander Potapenko return flags & __GFP_ZERO; 7806471384aSAlexander Potapenko return true; 7816471384aSAlexander Potapenko } 7826471384aSAlexander Potapenko return flags & __GFP_ZERO; 7836471384aSAlexander Potapenko } 7846471384aSAlexander Potapenko 7856471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c) 7866471384aSAlexander Potapenko { 78751cba1ebSKees Cook if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 78851cba1ebSKees Cook &init_on_free)) 7896471384aSAlexander Potapenko return !(c->ctor || 7906471384aSAlexander Potapenko (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); 7916471384aSAlexander Potapenko return false; 7926471384aSAlexander Potapenko } 7936471384aSAlexander Potapenko 79464dd6849SFaiyaz Mohammed #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 79564dd6849SFaiyaz Mohammed void debugfs_slab_release(struct kmem_cache *); 79664dd6849SFaiyaz Mohammed #else 79764dd6849SFaiyaz Mohammed static inline void debugfs_slab_release(struct kmem_cache *s) { } 79864dd6849SFaiyaz Mohammed #endif 79964dd6849SFaiyaz Mohammed 8005bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK 8018e7f37f2SPaul E. McKenney #define KS_ADDRS_COUNT 16 8028e7f37f2SPaul E. McKenney struct kmem_obj_info { 8038e7f37f2SPaul E. McKenney void *kp_ptr; 804*7213230aSMatthew Wilcox (Oracle) struct slab *kp_slab; 8058e7f37f2SPaul E. McKenney void *kp_objp; 8068e7f37f2SPaul E. McKenney unsigned long kp_data_offset; 8078e7f37f2SPaul E. McKenney struct kmem_cache *kp_slab_cache; 8088e7f37f2SPaul E. McKenney void *kp_ret; 8098e7f37f2SPaul E. McKenney void *kp_stack[KS_ADDRS_COUNT]; 810e548eaa1SManinder Singh void *kp_free_stack[KS_ADDRS_COUNT]; 8118e7f37f2SPaul E. McKenney }; 812*7213230aSMatthew Wilcox (Oracle) void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); 8135bb1bb35SPaul E. McKenney #endif 8148e7f37f2SPaul E. McKenney 8155240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */ 816