1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 297d06609SChristoph Lameter #ifndef MM_SLAB_H 397d06609SChristoph Lameter #define MM_SLAB_H 497d06609SChristoph Lameter /* 597d06609SChristoph Lameter * Internal slab definitions 697d06609SChristoph Lameter */ 797d06609SChristoph Lameter 807f361b2SJoonsoo Kim #ifdef CONFIG_SLOB 907f361b2SJoonsoo Kim /* 1007f361b2SJoonsoo Kim * Common fields provided in kmem_cache by all slab allocators 1107f361b2SJoonsoo Kim * This struct is either used directly by the allocator (SLOB) 1207f361b2SJoonsoo Kim * or the allocator must include definitions for all fields 1307f361b2SJoonsoo Kim * provided in kmem_cache_common in their definition of kmem_cache. 1407f361b2SJoonsoo Kim * 1507f361b2SJoonsoo Kim * Once we can do anonymous structs (C11 standard) we could put a 1607f361b2SJoonsoo Kim * anonymous struct definition in these allocators so that the 1707f361b2SJoonsoo Kim * separate allocations in the kmem_cache structure of SLAB and 1807f361b2SJoonsoo Kim * SLUB is no longer needed. 1907f361b2SJoonsoo Kim */ 2007f361b2SJoonsoo Kim struct kmem_cache { 2107f361b2SJoonsoo Kim unsigned int object_size;/* The original size of the object */ 2207f361b2SJoonsoo Kim unsigned int size; /* The aligned/padded/added on size */ 2307f361b2SJoonsoo Kim unsigned int align; /* Alignment as calculated */ 24d50112edSAlexey Dobriyan slab_flags_t flags; /* Active flags on the slab */ 257bbdb81eSAlexey Dobriyan unsigned int useroffset;/* Usercopy region offset */ 267bbdb81eSAlexey Dobriyan unsigned int usersize; /* Usercopy region size */ 2707f361b2SJoonsoo Kim const char *name; /* Slab name for sysfs */ 2807f361b2SJoonsoo Kim int refcount; /* Use counter */ 2907f361b2SJoonsoo Kim void (*ctor)(void *); /* Called on object slot creation */ 3007f361b2SJoonsoo Kim struct list_head list; /* List of all slab caches on the system */ 3107f361b2SJoonsoo Kim }; 3207f361b2SJoonsoo Kim 3307f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */ 3407f361b2SJoonsoo Kim 3507f361b2SJoonsoo Kim #ifdef CONFIG_SLAB 3607f361b2SJoonsoo Kim #include <linux/slab_def.h> 3707f361b2SJoonsoo Kim #endif 3807f361b2SJoonsoo Kim 3907f361b2SJoonsoo Kim #ifdef CONFIG_SLUB 4007f361b2SJoonsoo Kim #include <linux/slub_def.h> 4107f361b2SJoonsoo Kim #endif 4207f361b2SJoonsoo Kim 4307f361b2SJoonsoo Kim #include <linux/memcontrol.h> 4411c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h> 4511c7aec2SJesper Dangaard Brouer #include <linux/kasan.h> 4611c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h> 477c00fce9SThomas Garnier #include <linux/random.h> 48d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h> 4907f361b2SJoonsoo Kim 5097d06609SChristoph Lameter /* 5197d06609SChristoph Lameter * State of the slab allocator. 5297d06609SChristoph Lameter * 5397d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 5497d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 5597d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 5697d06609SChristoph Lameter * allocated from slab caches themselves. 5797d06609SChristoph Lameter */ 5897d06609SChristoph Lameter enum slab_state { 5997d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 6097d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 61ce8eb6c4SChristoph Lameter PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 6297d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 6397d06609SChristoph Lameter FULL /* Everything is working */ 6497d06609SChristoph Lameter }; 6597d06609SChristoph Lameter 6697d06609SChristoph Lameter extern enum slab_state slab_state; 6797d06609SChristoph Lameter 6818004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 6918004c5dSChristoph Lameter extern struct mutex slab_mutex; 709b030cb8SChristoph Lameter 719b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 7218004c5dSChristoph Lameter extern struct list_head slab_caches; 7318004c5dSChristoph Lameter 749b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 759b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 769b030cb8SChristoph Lameter 77af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */ 78af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct { 79cb5d9fb3SPengfei Li const char *name[NR_KMALLOC_TYPES]; 8055de8b9cSAlexey Dobriyan unsigned int size; 81af3b5f87SVlastimil Babka } kmalloc_info[]; 82af3b5f87SVlastimil Babka 83f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB 84f97d5f63SChristoph Lameter /* Kmalloc array related functions */ 8534cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void); 86d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t); 872c59dd65SChristoph Lameter 882c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */ 892c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t); 90f97d5f63SChristoph Lameter #endif 91f97d5f63SChristoph Lameter 9244405099SLong Li gfp_t kmalloc_fix_flags(gfp_t flags); 93f97d5f63SChristoph Lameter 949b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 95d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 9697d06609SChristoph Lameter 9755de8b9cSAlexey Dobriyan struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, 9855de8b9cSAlexey Dobriyan slab_flags_t flags, unsigned int useroffset, 9955de8b9cSAlexey Dobriyan unsigned int usersize); 10045530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name, 101361d575eSAlexey Dobriyan unsigned int size, slab_flags_t flags, 102361d575eSAlexey Dobriyan unsigned int useroffset, unsigned int usersize); 10345530c44SChristoph Lameter 104423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s); 105f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align, 106d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, void (*ctor)(void *)); 10712220deaSJoonsoo Kim #ifndef CONFIG_SLOB 1082633d7a0SGlauber Costa struct kmem_cache * 109f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 110d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *)); 111423c929cSJoonsoo Kim 1120293d1fdSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned int object_size, 11337540008SNikolay Borisov slab_flags_t flags, const char *name); 114cbb79694SChristoph Lameter #else 1152633d7a0SGlauber Costa static inline struct kmem_cache * 116f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 117d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *)) 118cbb79694SChristoph Lameter { return NULL; } 119423c929cSJoonsoo Kim 1200293d1fdSAlexey Dobriyan static inline slab_flags_t kmem_cache_flags(unsigned int object_size, 12137540008SNikolay Borisov slab_flags_t flags, const char *name) 122423c929cSJoonsoo Kim { 123423c929cSJoonsoo Kim return flags; 124423c929cSJoonsoo Kim } 125cbb79694SChristoph Lameter #endif 126cbb79694SChristoph Lameter 127cbb79694SChristoph Lameter 128d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */ 1296d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ 1306d6ea1e9SNicolas Boichat SLAB_CACHE_DMA32 | SLAB_PANIC | \ 1315f0d5a3aSPaul E. McKenney SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 132d8843922SGlauber Costa 133d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB) 134d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 135d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG) 136d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 137becfda68SLaura Abbott SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 138d8843922SGlauber Costa #else 139d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0) 140d8843922SGlauber Costa #endif 141d8843922SGlauber Costa 142d8843922SGlauber Costa #if defined(CONFIG_SLAB) 143d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 144230e9fc2SVladimir Davydov SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 14575f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT) 146d8843922SGlauber Costa #elif defined(CONFIG_SLUB) 147d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 14875f296d9SLevin, Alexander (Sasha Levin) SLAB_TEMPORARY | SLAB_ACCOUNT) 149d8843922SGlauber Costa #else 150d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0) 151d8843922SGlauber Costa #endif 152d8843922SGlauber Costa 153e70954fdSThomas Garnier /* Common flags available with current configuration */ 154d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 155d8843922SGlauber Costa 156e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */ 157e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 158e70954fdSThomas Garnier SLAB_RED_ZONE | \ 159e70954fdSThomas Garnier SLAB_POISON | \ 160e70954fdSThomas Garnier SLAB_STORE_USER | \ 161e70954fdSThomas Garnier SLAB_TRACE | \ 162e70954fdSThomas Garnier SLAB_CONSISTENCY_CHECKS | \ 163e70954fdSThomas Garnier SLAB_MEM_SPREAD | \ 164e70954fdSThomas Garnier SLAB_NOLEAKTRACE | \ 165e70954fdSThomas Garnier SLAB_RECLAIM_ACCOUNT | \ 166e70954fdSThomas Garnier SLAB_TEMPORARY | \ 167e70954fdSThomas Garnier SLAB_ACCOUNT) 168e70954fdSThomas Garnier 169f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *); 170945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 17152b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *); 172c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *); 17341a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *); 174945cf2b6SChristoph Lameter 175b7454ad3SGlauber Costa struct seq_file; 176b7454ad3SGlauber Costa struct file; 177b7454ad3SGlauber Costa 1780d7561c6SGlauber Costa struct slabinfo { 1790d7561c6SGlauber Costa unsigned long active_objs; 1800d7561c6SGlauber Costa unsigned long num_objs; 1810d7561c6SGlauber Costa unsigned long active_slabs; 1820d7561c6SGlauber Costa unsigned long num_slabs; 1830d7561c6SGlauber Costa unsigned long shared_avail; 1840d7561c6SGlauber Costa unsigned int limit; 1850d7561c6SGlauber Costa unsigned int batchcount; 1860d7561c6SGlauber Costa unsigned int shared; 1870d7561c6SGlauber Costa unsigned int objects_per_slab; 1880d7561c6SGlauber Costa unsigned int cache_order; 1890d7561c6SGlauber Costa }; 1900d7561c6SGlauber Costa 1910d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 1920d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 193b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 194b7454ad3SGlauber Costa size_t count, loff_t *ppos); 195ba6c496eSGlauber Costa 196484748f0SChristoph Lameter /* 197484748f0SChristoph Lameter * Generic implementation of bulk operations 198484748f0SChristoph Lameter * These are useful for situations in which the allocator cannot 1999f706d68SJesper Dangaard Brouer * perform optimizations. In that case segments of the object listed 200484748f0SChristoph Lameter * may be allocated or freed using these operations. 201484748f0SChristoph Lameter */ 202484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 203865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 204484748f0SChristoph Lameter 2051a984c4eSMuchun Song static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) 2066cea1d56SRoman Gushchin { 2076cea1d56SRoman Gushchin return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 208d42f3245SRoman Gushchin NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; 2096cea1d56SRoman Gushchin } 2106cea1d56SRoman Gushchin 211e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG 212e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG_ON 213e42f174eSVlastimil Babka DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); 214e42f174eSVlastimil Babka #else 215e42f174eSVlastimil Babka DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); 216e42f174eSVlastimil Babka #endif 217e42f174eSVlastimil Babka extern void print_tracking(struct kmem_cache *s, void *object); 218e42f174eSVlastimil Babka #else 219e42f174eSVlastimil Babka static inline void print_tracking(struct kmem_cache *s, void *object) 220e42f174eSVlastimil Babka { 221e42f174eSVlastimil Babka } 222e42f174eSVlastimil Babka #endif 223e42f174eSVlastimil Babka 224e42f174eSVlastimil Babka /* 225e42f174eSVlastimil Babka * Returns true if any of the specified slub_debug flags is enabled for the 226e42f174eSVlastimil Babka * cache. Use only for flags parsed by setup_slub_debug() as it also enables 227e42f174eSVlastimil Babka * the static key. 228e42f174eSVlastimil Babka */ 229e42f174eSVlastimil Babka static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) 230e42f174eSVlastimil Babka { 231e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG 232e42f174eSVlastimil Babka VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); 233e42f174eSVlastimil Babka if (static_branch_unlikely(&slub_debug_enabled)) 234e42f174eSVlastimil Babka return s->flags & flags; 235e42f174eSVlastimil Babka #endif 236e42f174eSVlastimil Babka return false; 237e42f174eSVlastimil Babka } 238e42f174eSVlastimil Babka 23984c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 24010befea9SRoman Gushchin int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2412e9bd483SRoman Gushchin gfp_t gfp, bool new_page); 242286e04b8SRoman Gushchin 243286e04b8SRoman Gushchin static inline void memcg_free_page_obj_cgroups(struct page *page) 244286e04b8SRoman Gushchin { 245270c6a71SRoman Gushchin kfree(page_objcgs(page)); 246bcfe06bfSRoman Gushchin page->memcg_data = 0; 247286e04b8SRoman Gushchin } 248286e04b8SRoman Gushchin 249f2fe7b09SRoman Gushchin static inline size_t obj_full_size(struct kmem_cache *s) 250f2fe7b09SRoman Gushchin { 251f2fe7b09SRoman Gushchin /* 252f2fe7b09SRoman Gushchin * For each accounted object there is an extra space which is used 253f2fe7b09SRoman Gushchin * to store obj_cgroup membership. Charge it too. 254f2fe7b09SRoman Gushchin */ 255f2fe7b09SRoman Gushchin return s->size + sizeof(struct obj_cgroup *); 256f2fe7b09SRoman Gushchin } 257f2fe7b09SRoman Gushchin 258becaba65SRoman Gushchin /* 259becaba65SRoman Gushchin * Returns false if the allocation should fail. 260becaba65SRoman Gushchin */ 261becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 262becaba65SRoman Gushchin struct obj_cgroup **objcgp, 263becaba65SRoman Gushchin size_t objects, gfp_t flags) 264f2fe7b09SRoman Gushchin { 2659855609bSRoman Gushchin struct obj_cgroup *objcg; 266f2fe7b09SRoman Gushchin 267becaba65SRoman Gushchin if (!memcg_kmem_enabled()) 268becaba65SRoman Gushchin return true; 269becaba65SRoman Gushchin 270becaba65SRoman Gushchin if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) 271becaba65SRoman Gushchin return true; 272becaba65SRoman Gushchin 2739855609bSRoman Gushchin objcg = get_obj_cgroup_from_current(); 2749855609bSRoman Gushchin if (!objcg) 275becaba65SRoman Gushchin return true; 2769855609bSRoman Gushchin 2779855609bSRoman Gushchin if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { 2789855609bSRoman Gushchin obj_cgroup_put(objcg); 279becaba65SRoman Gushchin return false; 280f2fe7b09SRoman Gushchin } 281f2fe7b09SRoman Gushchin 282becaba65SRoman Gushchin *objcgp = objcg; 283becaba65SRoman Gushchin return true; 284f2fe7b09SRoman Gushchin } 285f2fe7b09SRoman Gushchin 286f2fe7b09SRoman Gushchin static inline void mod_objcg_state(struct obj_cgroup *objcg, 287f2fe7b09SRoman Gushchin struct pglist_data *pgdat, 2881a984c4eSMuchun Song enum node_stat_item idx, int nr) 289f2fe7b09SRoman Gushchin { 290f2fe7b09SRoman Gushchin struct mem_cgroup *memcg; 291f2fe7b09SRoman Gushchin struct lruvec *lruvec; 292f2fe7b09SRoman Gushchin 293f2fe7b09SRoman Gushchin rcu_read_lock(); 294f2fe7b09SRoman Gushchin memcg = obj_cgroup_memcg(objcg); 295f2fe7b09SRoman Gushchin lruvec = mem_cgroup_lruvec(memcg, pgdat); 296f2fe7b09SRoman Gushchin mod_memcg_lruvec_state(lruvec, idx, nr); 297f2fe7b09SRoman Gushchin rcu_read_unlock(); 298f2fe7b09SRoman Gushchin } 299f2fe7b09SRoman Gushchin 300964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 301964d4bd3SRoman Gushchin struct obj_cgroup *objcg, 30210befea9SRoman Gushchin gfp_t flags, size_t size, 30310befea9SRoman Gushchin void **p) 304964d4bd3SRoman Gushchin { 305964d4bd3SRoman Gushchin struct page *page; 306964d4bd3SRoman Gushchin unsigned long off; 307964d4bd3SRoman Gushchin size_t i; 308964d4bd3SRoman Gushchin 309becaba65SRoman Gushchin if (!memcg_kmem_enabled() || !objcg) 31010befea9SRoman Gushchin return; 31110befea9SRoman Gushchin 31210befea9SRoman Gushchin flags &= ~__GFP_ACCOUNT; 313964d4bd3SRoman Gushchin for (i = 0; i < size; i++) { 314964d4bd3SRoman Gushchin if (likely(p[i])) { 315964d4bd3SRoman Gushchin page = virt_to_head_page(p[i]); 31610befea9SRoman Gushchin 317270c6a71SRoman Gushchin if (!page_objcgs(page) && 3182e9bd483SRoman Gushchin memcg_alloc_page_obj_cgroups(page, s, flags, 3192e9bd483SRoman Gushchin false)) { 32010befea9SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s)); 32110befea9SRoman Gushchin continue; 32210befea9SRoman Gushchin } 32310befea9SRoman Gushchin 324964d4bd3SRoman Gushchin off = obj_to_index(s, page, p[i]); 325964d4bd3SRoman Gushchin obj_cgroup_get(objcg); 326270c6a71SRoman Gushchin page_objcgs(page)[off] = objcg; 327f2fe7b09SRoman Gushchin mod_objcg_state(objcg, page_pgdat(page), 328f2fe7b09SRoman Gushchin cache_vmstat_idx(s), obj_full_size(s)); 329f2fe7b09SRoman Gushchin } else { 330f2fe7b09SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s)); 331964d4bd3SRoman Gushchin } 332964d4bd3SRoman Gushchin } 333964d4bd3SRoman Gushchin obj_cgroup_put(objcg); 334964d4bd3SRoman Gushchin } 335964d4bd3SRoman Gushchin 336d1b2cf6cSBharata B Rao static inline void memcg_slab_free_hook(struct kmem_cache *s_orig, 337d1b2cf6cSBharata B Rao void **p, int objects) 338964d4bd3SRoman Gushchin { 339d1b2cf6cSBharata B Rao struct kmem_cache *s; 340270c6a71SRoman Gushchin struct obj_cgroup **objcgs; 341964d4bd3SRoman Gushchin struct obj_cgroup *objcg; 342d1b2cf6cSBharata B Rao struct page *page; 343964d4bd3SRoman Gushchin unsigned int off; 344d1b2cf6cSBharata B Rao int i; 345964d4bd3SRoman Gushchin 34610befea9SRoman Gushchin if (!memcg_kmem_enabled()) 34710befea9SRoman Gushchin return; 34810befea9SRoman Gushchin 349d1b2cf6cSBharata B Rao for (i = 0; i < objects; i++) { 350d1b2cf6cSBharata B Rao if (unlikely(!p[i])) 351d1b2cf6cSBharata B Rao continue; 352d1b2cf6cSBharata B Rao 353d1b2cf6cSBharata B Rao page = virt_to_head_page(p[i]); 354270c6a71SRoman Gushchin objcgs = page_objcgs(page); 355270c6a71SRoman Gushchin if (!objcgs) 356d1b2cf6cSBharata B Rao continue; 357964d4bd3SRoman Gushchin 358d1b2cf6cSBharata B Rao if (!s_orig) 359d1b2cf6cSBharata B Rao s = page->slab_cache; 360d1b2cf6cSBharata B Rao else 361d1b2cf6cSBharata B Rao s = s_orig; 362d1b2cf6cSBharata B Rao 363d1b2cf6cSBharata B Rao off = obj_to_index(s, page, p[i]); 364270c6a71SRoman Gushchin objcg = objcgs[off]; 36510befea9SRoman Gushchin if (!objcg) 366d1b2cf6cSBharata B Rao continue; 36710befea9SRoman Gushchin 368270c6a71SRoman Gushchin objcgs[off] = NULL; 369f2fe7b09SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s)); 370f2fe7b09SRoman Gushchin mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), 371f2fe7b09SRoman Gushchin -obj_full_size(s)); 372964d4bd3SRoman Gushchin obj_cgroup_put(objcg); 373964d4bd3SRoman Gushchin } 374d1b2cf6cSBharata B Rao } 375964d4bd3SRoman Gushchin 37684c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */ 3779855609bSRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) 3784d96ba35SRoman Gushchin { 3794d96ba35SRoman Gushchin return NULL; 3804d96ba35SRoman Gushchin } 3814d96ba35SRoman Gushchin 382286e04b8SRoman Gushchin static inline int memcg_alloc_page_obj_cgroups(struct page *page, 3832e9bd483SRoman Gushchin struct kmem_cache *s, gfp_t gfp, 3842e9bd483SRoman Gushchin bool new_page) 385286e04b8SRoman Gushchin { 386286e04b8SRoman Gushchin return 0; 387286e04b8SRoman Gushchin } 388286e04b8SRoman Gushchin 389286e04b8SRoman Gushchin static inline void memcg_free_page_obj_cgroups(struct page *page) 390286e04b8SRoman Gushchin { 391286e04b8SRoman Gushchin } 392286e04b8SRoman Gushchin 393becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 394becaba65SRoman Gushchin struct obj_cgroup **objcgp, 395becaba65SRoman Gushchin size_t objects, gfp_t flags) 396f2fe7b09SRoman Gushchin { 397becaba65SRoman Gushchin return true; 398f2fe7b09SRoman Gushchin } 399f2fe7b09SRoman Gushchin 400964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 401964d4bd3SRoman Gushchin struct obj_cgroup *objcg, 40210befea9SRoman Gushchin gfp_t flags, size_t size, 40310befea9SRoman Gushchin void **p) 404964d4bd3SRoman Gushchin { 405964d4bd3SRoman Gushchin } 406964d4bd3SRoman Gushchin 407d1b2cf6cSBharata B Rao static inline void memcg_slab_free_hook(struct kmem_cache *s, 408d1b2cf6cSBharata B Rao void **p, int objects) 409964d4bd3SRoman Gushchin { 410964d4bd3SRoman Gushchin } 41184c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */ 412b9ce5ef4SGlauber Costa 413a64b5378SKees Cook static inline struct kmem_cache *virt_to_cache(const void *obj) 414a64b5378SKees Cook { 415a64b5378SKees Cook struct page *page; 416a64b5378SKees Cook 417a64b5378SKees Cook page = virt_to_head_page(obj); 418a64b5378SKees Cook if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n", 419a64b5378SKees Cook __func__)) 420a64b5378SKees Cook return NULL; 421a64b5378SKees Cook return page->slab_cache; 422a64b5378SKees Cook } 423a64b5378SKees Cook 42474d555beSRoman Gushchin static __always_inline void account_slab_page(struct page *page, int order, 4252e9bd483SRoman Gushchin struct kmem_cache *s, 4262e9bd483SRoman Gushchin gfp_t gfp) 4276cea1d56SRoman Gushchin { 4282e9bd483SRoman Gushchin if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT)) 4292e9bd483SRoman Gushchin memcg_alloc_page_obj_cgroups(page, s, gfp, true); 4302e9bd483SRoman Gushchin 431f2fe7b09SRoman Gushchin mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), 432f2fe7b09SRoman Gushchin PAGE_SIZE << order); 4336cea1d56SRoman Gushchin } 4346cea1d56SRoman Gushchin 43574d555beSRoman Gushchin static __always_inline void unaccount_slab_page(struct page *page, int order, 4366cea1d56SRoman Gushchin struct kmem_cache *s) 4376cea1d56SRoman Gushchin { 43810befea9SRoman Gushchin if (memcg_kmem_enabled()) 439f2fe7b09SRoman Gushchin memcg_free_page_obj_cgroups(page); 4409855609bSRoman Gushchin 4414d96ba35SRoman Gushchin mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), 442d42f3245SRoman Gushchin -(PAGE_SIZE << order)); 4436cea1d56SRoman Gushchin } 4446cea1d56SRoman Gushchin 445e42f174eSVlastimil Babka static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 446e42f174eSVlastimil Babka { 447e42f174eSVlastimil Babka struct kmem_cache *cachep; 448e42f174eSVlastimil Babka 449e42f174eSVlastimil Babka if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 450e42f174eSVlastimil Babka !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 451e42f174eSVlastimil Babka return s; 452e42f174eSVlastimil Babka 453e42f174eSVlastimil Babka cachep = virt_to_cache(x); 45410befea9SRoman Gushchin if (WARN(cachep && cachep != s, 455e42f174eSVlastimil Babka "%s: Wrong slab cache. %s but object is from %s\n", 456e42f174eSVlastimil Babka __func__, s->name, cachep->name)) 457e42f174eSVlastimil Babka print_tracking(cachep, x); 458e42f174eSVlastimil Babka return cachep; 459e42f174eSVlastimil Babka } 460e42f174eSVlastimil Babka 46111c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s) 46211c7aec2SJesper Dangaard Brouer { 46311c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB 46411c7aec2SJesper Dangaard Brouer return s->object_size; 46511c7aec2SJesper Dangaard Brouer 46611c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */ 46711c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG 46811c7aec2SJesper Dangaard Brouer /* 46911c7aec2SJesper Dangaard Brouer * Debugging requires use of the padding between object 47011c7aec2SJesper Dangaard Brouer * and whatever may come after it. 47111c7aec2SJesper Dangaard Brouer */ 47211c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 47311c7aec2SJesper Dangaard Brouer return s->object_size; 47411c7aec2SJesper Dangaard Brouer # endif 47580a9201aSAlexander Potapenko if (s->flags & SLAB_KASAN) 47680a9201aSAlexander Potapenko return s->object_size; 47711c7aec2SJesper Dangaard Brouer /* 47811c7aec2SJesper Dangaard Brouer * If we have the need to store the freelist pointer 47911c7aec2SJesper Dangaard Brouer * back there or track user information then we can 48011c7aec2SJesper Dangaard Brouer * only use the space before that information. 48111c7aec2SJesper Dangaard Brouer */ 4825f0d5a3aSPaul E. McKenney if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 48311c7aec2SJesper Dangaard Brouer return s->inuse; 48411c7aec2SJesper Dangaard Brouer /* 48511c7aec2SJesper Dangaard Brouer * Else we can use all the padding etc for the allocation 48611c7aec2SJesper Dangaard Brouer */ 48711c7aec2SJesper Dangaard Brouer return s->size; 48811c7aec2SJesper Dangaard Brouer #endif 48911c7aec2SJesper Dangaard Brouer } 49011c7aec2SJesper Dangaard Brouer 49111c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 492964d4bd3SRoman Gushchin struct obj_cgroup **objcgp, 493964d4bd3SRoman Gushchin size_t size, gfp_t flags) 49411c7aec2SJesper Dangaard Brouer { 49511c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 496d92a8cfcSPeter Zijlstra 49795d6c701SDaniel Vetter might_alloc(flags); 49811c7aec2SJesper Dangaard Brouer 499fab9963aSJesper Dangaard Brouer if (should_failslab(s, flags)) 50011c7aec2SJesper Dangaard Brouer return NULL; 50111c7aec2SJesper Dangaard Brouer 502becaba65SRoman Gushchin if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) 503becaba65SRoman Gushchin return NULL; 50445264778SVladimir Davydov 50545264778SVladimir Davydov return s; 50611c7aec2SJesper Dangaard Brouer } 50711c7aec2SJesper Dangaard Brouer 508964d4bd3SRoman Gushchin static inline void slab_post_alloc_hook(struct kmem_cache *s, 509da844b78SAndrey Konovalov struct obj_cgroup *objcg, gfp_t flags, 510da844b78SAndrey Konovalov size_t size, void **p, bool init) 51111c7aec2SJesper Dangaard Brouer { 51211c7aec2SJesper Dangaard Brouer size_t i; 51311c7aec2SJesper Dangaard Brouer 51411c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask; 515da844b78SAndrey Konovalov 516da844b78SAndrey Konovalov /* 517da844b78SAndrey Konovalov * As memory initialization might be integrated into KASAN, 518da844b78SAndrey Konovalov * kasan_slab_alloc and initialization memset must be 519da844b78SAndrey Konovalov * kept together to avoid discrepancies in behavior. 520da844b78SAndrey Konovalov * 521da844b78SAndrey Konovalov * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 522da844b78SAndrey Konovalov */ 52311c7aec2SJesper Dangaard Brouer for (i = 0; i < size; i++) { 524da844b78SAndrey Konovalov p[i] = kasan_slab_alloc(s, p[i], flags, init); 525da844b78SAndrey Konovalov if (p[i] && init && !kasan_has_integrated_init()) 526da844b78SAndrey Konovalov memset(p[i], 0, s->object_size); 52753128245SAndrey Konovalov kmemleak_alloc_recursive(p[i], s->object_size, 1, 52811c7aec2SJesper Dangaard Brouer s->flags, flags); 52911c7aec2SJesper Dangaard Brouer } 53045264778SVladimir Davydov 53110befea9SRoman Gushchin memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 53211c7aec2SJesper Dangaard Brouer } 53311c7aec2SJesper Dangaard Brouer 53444c5356fSChristoph Lameter #ifndef CONFIG_SLOB 535ca34956bSChristoph Lameter /* 536ca34956bSChristoph Lameter * The slab lists for all objects. 537ca34956bSChristoph Lameter */ 538ca34956bSChristoph Lameter struct kmem_cache_node { 539ca34956bSChristoph Lameter spinlock_t list_lock; 540ca34956bSChristoph Lameter 541ca34956bSChristoph Lameter #ifdef CONFIG_SLAB 542ca34956bSChristoph Lameter struct list_head slabs_partial; /* partial list first, better asm code */ 543ca34956bSChristoph Lameter struct list_head slabs_full; 544ca34956bSChristoph Lameter struct list_head slabs_free; 545bf00bd34SDavid Rientjes unsigned long total_slabs; /* length of all slab lists */ 546bf00bd34SDavid Rientjes unsigned long free_slabs; /* length of free slab list only */ 547ca34956bSChristoph Lameter unsigned long free_objects; 548ca34956bSChristoph Lameter unsigned int free_limit; 549ca34956bSChristoph Lameter unsigned int colour_next; /* Per-node cache coloring */ 550ca34956bSChristoph Lameter struct array_cache *shared; /* shared per node */ 551c8522a3aSJoonsoo Kim struct alien_cache **alien; /* on other nodes */ 552ca34956bSChristoph Lameter unsigned long next_reap; /* updated without locking */ 553ca34956bSChristoph Lameter int free_touched; /* updated without locking */ 554ca34956bSChristoph Lameter #endif 555ca34956bSChristoph Lameter 556ca34956bSChristoph Lameter #ifdef CONFIG_SLUB 557ca34956bSChristoph Lameter unsigned long nr_partial; 558ca34956bSChristoph Lameter struct list_head partial; 559ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG 560ca34956bSChristoph Lameter atomic_long_t nr_slabs; 561ca34956bSChristoph Lameter atomic_long_t total_objects; 562ca34956bSChristoph Lameter struct list_head full; 563ca34956bSChristoph Lameter #endif 564ca34956bSChristoph Lameter #endif 565ca34956bSChristoph Lameter 566ca34956bSChristoph Lameter }; 567e25839f6SWanpeng Li 56844c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 56944c5356fSChristoph Lameter { 57044c5356fSChristoph Lameter return s->node[node]; 57144c5356fSChristoph Lameter } 57244c5356fSChristoph Lameter 57344c5356fSChristoph Lameter /* 57444c5356fSChristoph Lameter * Iterator over all nodes. The body will be executed for each node that has 57544c5356fSChristoph Lameter * a kmem_cache_node structure allocated (which is true for all online nodes) 57644c5356fSChristoph Lameter */ 57744c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \ 5789163582cSMikulas Patocka for (__node = 0; __node < nr_node_ids; __node++) \ 5799163582cSMikulas Patocka if ((__n = get_node(__s, __node))) 58044c5356fSChristoph Lameter 58144c5356fSChristoph Lameter #endif 58244c5356fSChristoph Lameter 5831df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos); 584276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos); 585276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p); 586b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p); 5875240ab40SAndrey Ryabinin 588852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 589852d8be0SYang Shi void dump_unreclaimable_slab(void); 590852d8be0SYang Shi #else 591852d8be0SYang Shi static inline void dump_unreclaimable_slab(void) 592852d8be0SYang Shi { 593852d8be0SYang Shi } 594852d8be0SYang Shi #endif 595852d8be0SYang Shi 59655834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 59755834c59SAlexander Potapenko 5987c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM 5997c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 6007c00fce9SThomas Garnier gfp_t gfp); 6017c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep); 6027c00fce9SThomas Garnier #else 6037c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep, 6047c00fce9SThomas Garnier unsigned int count, gfp_t gfp) 6057c00fce9SThomas Garnier { 6067c00fce9SThomas Garnier return 0; 6077c00fce9SThomas Garnier } 6087c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 6097c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 6107c00fce9SThomas Garnier 6116471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) 6126471384aSAlexander Potapenko { 61351cba1ebSKees Cook if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 61451cba1ebSKees Cook &init_on_alloc)) { 6156471384aSAlexander Potapenko if (c->ctor) 6166471384aSAlexander Potapenko return false; 6176471384aSAlexander Potapenko if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) 6186471384aSAlexander Potapenko return flags & __GFP_ZERO; 6196471384aSAlexander Potapenko return true; 6206471384aSAlexander Potapenko } 6216471384aSAlexander Potapenko return flags & __GFP_ZERO; 6226471384aSAlexander Potapenko } 6236471384aSAlexander Potapenko 6246471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c) 6256471384aSAlexander Potapenko { 62651cba1ebSKees Cook if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 62751cba1ebSKees Cook &init_on_free)) 6286471384aSAlexander Potapenko return !(c->ctor || 6296471384aSAlexander Potapenko (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); 6306471384aSAlexander Potapenko return false; 6316471384aSAlexander Potapenko } 6326471384aSAlexander Potapenko 6335bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK 6348e7f37f2SPaul E. McKenney #define KS_ADDRS_COUNT 16 6358e7f37f2SPaul E. McKenney struct kmem_obj_info { 6368e7f37f2SPaul E. McKenney void *kp_ptr; 6378e7f37f2SPaul E. McKenney struct page *kp_page; 6388e7f37f2SPaul E. McKenney void *kp_objp; 6398e7f37f2SPaul E. McKenney unsigned long kp_data_offset; 6408e7f37f2SPaul E. McKenney struct kmem_cache *kp_slab_cache; 6418e7f37f2SPaul E. McKenney void *kp_ret; 6428e7f37f2SPaul E. McKenney void *kp_stack[KS_ADDRS_COUNT]; 643*e548eaa1SManinder Singh void *kp_free_stack[KS_ADDRS_COUNT]; 6448e7f37f2SPaul E. McKenney }; 6458e7f37f2SPaul E. McKenney void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page); 6465bb1bb35SPaul E. McKenney #endif 6478e7f37f2SPaul E. McKenney 6485240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */ 649