197d06609SChristoph Lameter #ifndef MM_SLAB_H 297d06609SChristoph Lameter #define MM_SLAB_H 397d06609SChristoph Lameter /* 497d06609SChristoph Lameter * Internal slab definitions 597d06609SChristoph Lameter */ 697d06609SChristoph Lameter 797d06609SChristoph Lameter /* 897d06609SChristoph Lameter * State of the slab allocator. 997d06609SChristoph Lameter * 1097d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 1197d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 1297d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 1397d06609SChristoph Lameter * allocated from slab caches themselves. 1497d06609SChristoph Lameter */ 1597d06609SChristoph Lameter enum slab_state { 1697d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 1797d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 1897d06609SChristoph Lameter PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ 1997d06609SChristoph Lameter PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */ 2097d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 2197d06609SChristoph Lameter FULL /* Everything is working */ 2297d06609SChristoph Lameter }; 2397d06609SChristoph Lameter 2497d06609SChristoph Lameter extern enum slab_state slab_state; 2597d06609SChristoph Lameter 2618004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 2718004c5dSChristoph Lameter extern struct mutex slab_mutex; 289b030cb8SChristoph Lameter 299b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 3018004c5dSChristoph Lameter extern struct list_head slab_caches; 3118004c5dSChristoph Lameter 329b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 339b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 349b030cb8SChristoph Lameter 3545906855SChristoph Lameter unsigned long calculate_alignment(unsigned long flags, 3645906855SChristoph Lameter unsigned long align, unsigned long size); 3745906855SChristoph Lameter 38f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB 39f97d5f63SChristoph Lameter /* Kmalloc array related functions */ 40f97d5f63SChristoph Lameter void create_kmalloc_caches(unsigned long); 41*2c59dd65SChristoph Lameter 42*2c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */ 43*2c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t); 44f97d5f63SChristoph Lameter #endif 45f97d5f63SChristoph Lameter 46f97d5f63SChristoph Lameter 479b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 488a13a4ccSChristoph Lameter extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 4997d06609SChristoph Lameter 5045530c44SChristoph Lameter extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 5145530c44SChristoph Lameter unsigned long flags); 5245530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name, 5345530c44SChristoph Lameter size_t size, unsigned long flags); 5445530c44SChristoph Lameter 552633d7a0SGlauber Costa struct mem_cgroup; 56cbb79694SChristoph Lameter #ifdef CONFIG_SLUB 572633d7a0SGlauber Costa struct kmem_cache * 582633d7a0SGlauber Costa __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, 59cbb79694SChristoph Lameter size_t align, unsigned long flags, void (*ctor)(void *)); 60cbb79694SChristoph Lameter #else 612633d7a0SGlauber Costa static inline struct kmem_cache * 622633d7a0SGlauber Costa __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, 63cbb79694SChristoph Lameter size_t align, unsigned long flags, void (*ctor)(void *)) 64cbb79694SChristoph Lameter { return NULL; } 65cbb79694SChristoph Lameter #endif 66cbb79694SChristoph Lameter 67cbb79694SChristoph Lameter 68d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */ 69d8843922SGlauber Costa #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 70d8843922SGlauber Costa SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 71d8843922SGlauber Costa 72d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB) 73d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 74d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG) 75d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 76d8843922SGlauber Costa SLAB_TRACE | SLAB_DEBUG_FREE) 77d8843922SGlauber Costa #else 78d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0) 79d8843922SGlauber Costa #endif 80d8843922SGlauber Costa 81d8843922SGlauber Costa #if defined(CONFIG_SLAB) 82d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 83d8843922SGlauber Costa SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) 84d8843922SGlauber Costa #elif defined(CONFIG_SLUB) 85d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 86d8843922SGlauber Costa SLAB_TEMPORARY | SLAB_NOTRACK) 87d8843922SGlauber Costa #else 88d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0) 89d8843922SGlauber Costa #endif 90d8843922SGlauber Costa 91d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 92d8843922SGlauber Costa 93945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 94945cf2b6SChristoph Lameter 95b7454ad3SGlauber Costa struct seq_file; 96b7454ad3SGlauber Costa struct file; 97b7454ad3SGlauber Costa 980d7561c6SGlauber Costa struct slabinfo { 990d7561c6SGlauber Costa unsigned long active_objs; 1000d7561c6SGlauber Costa unsigned long num_objs; 1010d7561c6SGlauber Costa unsigned long active_slabs; 1020d7561c6SGlauber Costa unsigned long num_slabs; 1030d7561c6SGlauber Costa unsigned long shared_avail; 1040d7561c6SGlauber Costa unsigned int limit; 1050d7561c6SGlauber Costa unsigned int batchcount; 1060d7561c6SGlauber Costa unsigned int shared; 1070d7561c6SGlauber Costa unsigned int objects_per_slab; 1080d7561c6SGlauber Costa unsigned int cache_order; 1090d7561c6SGlauber Costa }; 1100d7561c6SGlauber Costa 1110d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 1120d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 113b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 114b7454ad3SGlauber Costa size_t count, loff_t *ppos); 115ba6c496eSGlauber Costa 116ba6c496eSGlauber Costa #ifdef CONFIG_MEMCG_KMEM 117ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 118ba6c496eSGlauber Costa { 119ba6c496eSGlauber Costa return !s->memcg_params || s->memcg_params->is_root_cache; 120ba6c496eSGlauber Costa } 1212633d7a0SGlauber Costa 1222633d7a0SGlauber Costa static inline bool cache_match_memcg(struct kmem_cache *cachep, 1232633d7a0SGlauber Costa struct mem_cgroup *memcg) 1242633d7a0SGlauber Costa { 1252633d7a0SGlauber Costa return (is_root_cache(cachep) && !memcg) || 1262633d7a0SGlauber Costa (cachep->memcg_params->memcg == memcg); 1272633d7a0SGlauber Costa } 128b9ce5ef4SGlauber Costa 1291f458cbfSGlauber Costa static inline void memcg_bind_pages(struct kmem_cache *s, int order) 1301f458cbfSGlauber Costa { 1311f458cbfSGlauber Costa if (!is_root_cache(s)) 1321f458cbfSGlauber Costa atomic_add(1 << order, &s->memcg_params->nr_pages); 1331f458cbfSGlauber Costa } 1341f458cbfSGlauber Costa 1351f458cbfSGlauber Costa static inline void memcg_release_pages(struct kmem_cache *s, int order) 1361f458cbfSGlauber Costa { 1371f458cbfSGlauber Costa if (is_root_cache(s)) 1381f458cbfSGlauber Costa return; 1391f458cbfSGlauber Costa 1401f458cbfSGlauber Costa if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages)) 1411f458cbfSGlauber Costa mem_cgroup_destroy_cache(s); 1421f458cbfSGlauber Costa } 1431f458cbfSGlauber Costa 144b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 145b9ce5ef4SGlauber Costa struct kmem_cache *p) 146b9ce5ef4SGlauber Costa { 147b9ce5ef4SGlauber Costa return (p == s) || 148b9ce5ef4SGlauber Costa (s->memcg_params && (p == s->memcg_params->root_cache)); 149b9ce5ef4SGlauber Costa } 150749c5415SGlauber Costa 151749c5415SGlauber Costa /* 152749c5415SGlauber Costa * We use suffixes to the name in memcg because we can't have caches 153749c5415SGlauber Costa * created in the system with the same name. But when we print them 154749c5415SGlauber Costa * locally, better refer to them with the base name 155749c5415SGlauber Costa */ 156749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 157749c5415SGlauber Costa { 158749c5415SGlauber Costa if (!is_root_cache(s)) 159749c5415SGlauber Costa return s->memcg_params->root_cache->name; 160749c5415SGlauber Costa return s->name; 161749c5415SGlauber Costa } 162749c5415SGlauber Costa 163749c5415SGlauber Costa static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) 164749c5415SGlauber Costa { 165749c5415SGlauber Costa return s->memcg_params->memcg_caches[idx]; 166749c5415SGlauber Costa } 167943a451aSGlauber Costa 168943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 169943a451aSGlauber Costa { 170943a451aSGlauber Costa if (is_root_cache(s)) 171943a451aSGlauber Costa return s; 172943a451aSGlauber Costa return s->memcg_params->root_cache; 173943a451aSGlauber Costa } 174ba6c496eSGlauber Costa #else 175ba6c496eSGlauber Costa static inline bool is_root_cache(struct kmem_cache *s) 176ba6c496eSGlauber Costa { 177ba6c496eSGlauber Costa return true; 178ba6c496eSGlauber Costa } 179ba6c496eSGlauber Costa 1802633d7a0SGlauber Costa static inline bool cache_match_memcg(struct kmem_cache *cachep, 1812633d7a0SGlauber Costa struct mem_cgroup *memcg) 1822633d7a0SGlauber Costa { 1832633d7a0SGlauber Costa return true; 1842633d7a0SGlauber Costa } 185b9ce5ef4SGlauber Costa 1861f458cbfSGlauber Costa static inline void memcg_bind_pages(struct kmem_cache *s, int order) 1871f458cbfSGlauber Costa { 1881f458cbfSGlauber Costa } 1891f458cbfSGlauber Costa 1901f458cbfSGlauber Costa static inline void memcg_release_pages(struct kmem_cache *s, int order) 1911f458cbfSGlauber Costa { 1921f458cbfSGlauber Costa } 1931f458cbfSGlauber Costa 194b9ce5ef4SGlauber Costa static inline bool slab_equal_or_root(struct kmem_cache *s, 195b9ce5ef4SGlauber Costa struct kmem_cache *p) 196b9ce5ef4SGlauber Costa { 197b9ce5ef4SGlauber Costa return true; 198b9ce5ef4SGlauber Costa } 199749c5415SGlauber Costa 200749c5415SGlauber Costa static inline const char *cache_name(struct kmem_cache *s) 201749c5415SGlauber Costa { 202749c5415SGlauber Costa return s->name; 203749c5415SGlauber Costa } 204749c5415SGlauber Costa 205749c5415SGlauber Costa static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) 206749c5415SGlauber Costa { 207749c5415SGlauber Costa return NULL; 208749c5415SGlauber Costa } 209943a451aSGlauber Costa 210943a451aSGlauber Costa static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 211943a451aSGlauber Costa { 212943a451aSGlauber Costa return s; 213943a451aSGlauber Costa } 214ba6c496eSGlauber Costa #endif 215b9ce5ef4SGlauber Costa 216b9ce5ef4SGlauber Costa static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 217b9ce5ef4SGlauber Costa { 218b9ce5ef4SGlauber Costa struct kmem_cache *cachep; 219b9ce5ef4SGlauber Costa struct page *page; 220b9ce5ef4SGlauber Costa 221b9ce5ef4SGlauber Costa /* 222b9ce5ef4SGlauber Costa * When kmemcg is not being used, both assignments should return the 223b9ce5ef4SGlauber Costa * same value. but we don't want to pay the assignment price in that 224b9ce5ef4SGlauber Costa * case. If it is not compiled in, the compiler should be smart enough 225b9ce5ef4SGlauber Costa * to not do even the assignment. In that case, slab_equal_or_root 226b9ce5ef4SGlauber Costa * will also be a constant. 227b9ce5ef4SGlauber Costa */ 228b9ce5ef4SGlauber Costa if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) 229b9ce5ef4SGlauber Costa return s; 230b9ce5ef4SGlauber Costa 231b9ce5ef4SGlauber Costa page = virt_to_head_page(x); 232b9ce5ef4SGlauber Costa cachep = page->slab_cache; 233b9ce5ef4SGlauber Costa if (slab_equal_or_root(cachep, s)) 234b9ce5ef4SGlauber Costa return cachep; 235b9ce5ef4SGlauber Costa 236b9ce5ef4SGlauber Costa pr_err("%s: Wrong slab cache. %s but object is from %s\n", 237b9ce5ef4SGlauber Costa __FUNCTION__, cachep->name, s->name); 238b9ce5ef4SGlauber Costa WARN_ON_ONCE(1); 239b9ce5ef4SGlauber Costa return s; 240b9ce5ef4SGlauber Costa } 24197d06609SChristoph Lameter #endif 242